OSDN Git Service

[Qt][LIBAV] Refer from FFMPEG 3.0.2.
authorK.Ohta <whatisthis.sowhat@gmail.com>
Sat, 28 May 2016 19:43:07 +0000 (04:43 +0900)
committerK.Ohta <whatisthis.sowhat@gmail.com>
Sat, 28 May 2016 19:43:07 +0000 (04:43 +0900)
source/src/qt/avio/movie_saver.cpp
source/src/qt/avio/movie_saver.h
source/src/qt/avio/movie_saver_audio.cpp
source/src/qt/avio/movie_saver_fileio.cpp

index 203cc12..1478aad 100644 (file)
@@ -114,26 +114,31 @@ void MOVIE_SAVER::run()
        bRunThread = true;
        //AGAR_DebugLog(AGAR_LOG_DEBUG, "MOVIE THREAD: Start");
 #if defined(USE_LIBAV)
-    AVCodecContext *c;
-    AVPacket pkt = { 0 }; // data and size must be 0;
-    AVFrame *frame;
+    AVFormatContext *os;
+    OutputStream *ost;
+    InputStream *ist;
 #endif 
     int ret;
     int got_packet;
     int dst_nb_samples;
 
-       int fps_wait = (int)((1000.0 / p_osd->vm_frame_rate()) / 2.0);
+       int fps_wait = (int)((1000.0 / p_osd->vm_frame_rate()) / 4.0);
        int tmp_wait = fps_wait;
        int ncount_audio = 0;
        int ncount_video = 0;
-       int64_t audio_remain = 0;
-       int64_t video_remain = 0;
-       uint32_t audio_offset = 0;
-       uint32_t audio_frame_offset = 0;
-       uint32_t video_offset = 0;
-       bool audio_continue;
-       bool video_continue;
-
+       bool audio_continue = false;
+       bool video_continue = false;
+       bool need_audio_transcode = false;
+       bool need_video_transcode = false;
+       int i;
+    int64_t total_packets_written = 0;
+       
+       audio_remain = 0;
+       video_remain = 0;
+       audio_offset = 0;
+       audio_frame_offset = 0;
+       video_offset = 0;
+       
 #if defined(USE_LIBAV)
        av_init_packet(&pkt);
 #endif
@@ -141,102 +146,73 @@ void MOVIE_SAVER::run()
        while(bRunThread) {
                if(recording) {
                        if(!bRunThread) break;
-                       if(!audio_data_queue.isEmpty()) {
-                               bool f = false;
-                               if(audio_remain <= 0) {
-                                       f = true;
+                       if(old_recording != recording) {
+                               if(_filename.isEmpty()) {
+                                       goto _next_turn;
                                }
-                               if(f || audio_continue) {
-                                       f = dequeue_audio(audio_frame);
-                                       audio_offset = 0;
-                                       audio_remain = audio_size;
-                               }
-                               audio_continue = false;
-                               if(f || (audio_remain > 0)) {
-#if defined(USE_LIBAV)
-                                       uint64_t bytes;
-                                       uint64_t us;
-                                       double samples;
-                                       int ret;
-                                       int16_t *ptr = audio_frame;
-                                       int16_t *optr;
-                                       
-                                       if(audio_remain <= 0) {
-                                               bytes = audio_size;
-                                               audio_remain = bytes;
-                                       } else {
-                                               bytes = audio_remain;
-                                       }
-                                       us = (uint64_t)floor(((double)bytes * 1000000.0) / (double)audio_codec_context->sample_rate);
-                                       samples = ((double)us / 1000000.0) * (double)audio_codec_context->sample_rate;
-                                       AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver: Write audio data %d bytes", bytes);
-                                       
-                                       if(bytes == 0) goto _video;
-
-                                       {
-                                               frame = audio_tmp_frame;
-                                               if (av_compare_ts(audio_next_pts, audio_stream->codec->time_base,
-                                                                                 (double)fps_wait, (AVRational){ 1, 1 }) >= 0) {
-                                                       //audio_continue = false;
-                                                       goto _video;
-                                               }
-                                               optr = (int16_t *)frame->data;
-                                               for(int j = audio_frame_offset; j < frame->nb_samples; j++) {
-                                                       if(audio_remain <= 0) {
-                                                               audio_continue = true;
-                                                               goto _video;
-                                                       }
-                                                       if(audio_offset >= audio_size) {
-                                                               audio_offset = 0;
-                                                               audio_continue = true;
-                                                               goto _video;
-                                                       }
-                                                       for(int k = 0; k < audio_stream->codec->channels; k++) {
-                                                               optr[(audio_frame_offset * audio_stream->codec->channels)+ k] = ptr[audio_offset + k];
-                                                       }
-                                                       audio_offset += audio_stream->codec->channels;
-                                                       audio_remain -= audio_stream->codec->channels;
-                                                       audio_frame_offset++;
-                                               }
-                                               frame->pts = audio_next_pts;
-                                               audio_next_pts += frame->nb_samples;
-                                       }
-                                       audio_frame_offset = 0;
-                                       //if(!audio_resample((void *)audio_frame_data)) {
-                                       //      do_close();
-                                       //      audio_continue = false;
-                                       //      goto _video;
-                                       //}
-                                       av_init_packet(&pkt);
-                                       ret = avcodec_encode_audio2(audio_codec_context, &pkt, frame, &got_packet);
-                                       if (ret < 0) {
-                                               AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver : Error encoding audio frame\n");
-                                               do_close();
-                                               audio_continue = false;
-                                               goto _video;
-                                       }
-                                       if (got_packet) {
-                                               ret = write_audio_frame((const void *)(&audio_codec_context->time_base), (void *)(&pkt));
-                                               if (ret < 0) {
-                                                       AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Audio Error while writing audio frame\n");
-                                                       do_close();
-                                                       audio_continue = false;
-                                                       goto _video;
-                                               }
-                                       }
-#endif // defined(USE_LIBAV)
-                                       totalAudioFrame++;
-                                       ncount_audio++;
+                               ret = transcode_init();
+                               if (ret < 0) {
+                                       goto _final;
                                }
+                               timer_start = av_gettime_relative();
+                               audio_remain = 0;
+                               video_remain = 0;
+                               audio_offset = 0;
+                               audio_frame_offset = 0;
+                               video_frame_offset = 0;
+                               video_offset = 0;
+                                               
+                       }
+                       if(audio_remain <= 0) {
+                               if(audio_data_queue.isEmpty()) goto _video;
+                               decueue_audio(audio_frame_buf);
+                               audio_remain = audio_size;
+                               audio_offset = 0;
+                               need_audio_transcode = true;
                        }
                _video:
-                       if(0) {
+                       if(video_remain <= 0) {
+                               if(video_data_queue.isEmpty() || video_width_queue.isEmpty()
+                                  || video_height_queue.isEmpty())
+                                       goto _video;
+                               decueue_video(video_frame_buf);
+                               video_remain = video_size;
+                               video_offset = 0;
+                               need_video_transcode = true;
+                       }                       
+                       //int64_t cur_time= av_gettime_relative();
+
+                       /* if 'q' pressed, exits */
+                       //if (stdin_interaction)
+            //if (check_keyboard_interaction(cur_time) < 0)
+            //    break;
+
+                       /* check if there's any stream where output is still needed */
+                       if (!need_output()) {
+                               AGAR_DebugLog(AGAR_LOG_INFO, "No more output streams to write to, finishing.\n");
+                               goto _final;
                        }
-               }
-               if(ncount_audio > 10) { 
-                       ncount_audio = 0;
-               }
+
+                       //if(!need_audio_transcode || !need_video_transcode) goto _next_turn;
                
+                       if(!need_audio_transcode) goto _next_turn;
+                       ret = transcode_step();
+                       need_audio_transcode = false;
+                       need_video_transcode = false;
+
+                       if (ret < 0 && ret != AVERROR_EOF) {
+                               char errbuf[128];
+                               av_strerror(ret, errbuf, sizeof(errbuf));
+                               AGAR_DebugLog(AGAR_LOG_INFO, "Error while filtering: %s\n", errbuf);
+                               goto _final;
+                       }
+
+                       /* dump report by using the output first video and audio streams */
+                       //print_report(0, timer_start, cur_time);
+               }
+       _next_turn:
+               if(!bRunThread) break;
+                       
                if(fps_wait >= tmp_wait) {
                        this->msleep(tmp_wait);
                        tmp_wait = 0;
@@ -245,34 +221,76 @@ void MOVIE_SAVER::run()
                        tmp_wait -= fps_wait;
                }
                if(tmp_wait <= 0) {
-                       fps_wait = (int)((1000.0 / p_osd->vm_frame_rate()) / 2.0);
+                       fps_wait = (int)((1000.0 / p_osd->vm_frame_rate()) / 4.0);
                        tmp_wait = fps_wait;
                }
+               old_recording = recording;
+               continue;
+       _final:
+               do_close();
+               old_recording = false;
        }
+       do_close();
 }
 
 void MOVIE_SAVER::do_close()
 {
        int i;
 #if defined(USE_LIBAV)
-       if(output_context != NULL) {
-               av_write_trailer(output_context);
-               avio_close(output_context->pb);
-               //av_freep(&output_context->pb);
-       }
-       if(audio_stream != NULL) {
-               av_free(audio_stream);
-       }
-       if(video_stream != NULL) {
-               av_free(video_stream);
-       }
-       audio_stream = NULL;
-       video_stream = NULL;
+    int ret, i;
+    AVFormatContext *os;
+    OutputStream *ost;
+    //InputStream *ist;
+    int64_t total_packets_written = 0;
+    /* at the end of stream, we must flush the decoder buffers */
+    //for (i = 0; i < nb_input_streams; i++) {
+        //ist = input_streams[i];
+        //if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
+               //process_input_packet(ist, NULL, 0);
+                       //}
+    //}
+    flush_encoders();
 
-       if(output_context != NULL) {
-               av_free(output_context);
-               output_context = NULL;
-       }
+    //term_exit();
+
+    /* write the trailer if needed and close file */
+    for (i = 0; i < nb_output_files; i++) {
+        os = output_files[i]->ctx;
+        if ((ret = av_write_trailer(os)) < 0) {
+            AGAR_DebugLog(AGAR_LOG_INFO, "Movie/Saver Error writing trailer of %s: %s", os->filename, (const char *)av_err2str(ret));
+                       return 1;
+            //if (exit_on_error)
+            //    exit_program(1);
+        }
+    }
+
+    /* dump report by using the first video and audio streams */
+    //print_report(1, timer_start, av_gettime_relative());
+
+    /* close each encoder */
+    for (i = 0; i < nb_output_streams; i++) {
+        ost = output_streams[i];
+        if (ost->encoding_needed) {
+            av_freep(&ost->enc_ctx->stats_in);
+        }
+        total_packets_written += ost->packets_written;
+    }
+
+    if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
+        AGAR_DebugLog(AGAR_LOG_INFO, "Empty output\n");
+               return 1;
+        //exit_program(1);
+    }
+
+    /* close each decoder */
+    //for (i = 0; i < nb_input_streams; i++) {
+        //ist = input_streams[i];
+        //if (ist->decoding_needed) {
+        //    avcodec_close(ist->dec_ctx);
+        //    if (ist->hwaccel_uninit)
+        //        ist->hwaccel_uninit(ist->dec_ctx);
+        //}
+    //}
 #endif   // defined(USE_LIBAV)
        recording = false;
 
index 5a253ad..a1299ce 100644 (file)
@@ -25,6 +25,326 @@ extern "C" {
 }
 #endif
 
+// Copy from FFMPEG-3.0.2.
+typedef struct OutputStream {
+    int file_index;          /* file index */
+    int index;               /* stream index in the output file */
+    int source_index;        /* InputStream index */
+    AVStream *st;            /* stream in the output file */
+    int encoding_needed;     /* true if encoding needed for this stream */
+    int frame_number;
+    /* input pts and corresponding output pts
+       for A/V sync */
+    struct InputStream *sync_ist; /* input stream to sync against */
+    int64_t sync_opts;       /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
+    /* pts of the first frame encoded for this stream, used for limiting
+     * recording time */
+    int64_t first_pts;
+    /* dts of the last packet sent to the muxer */
+    int64_t last_mux_dts;
+    AVBitStreamFilterContext *bitstream_filters;
+    AVCodecContext *enc_ctx;
+    AVCodec *enc;
+    int64_t max_frames;
+    AVFrame *filtered_frame;
+    AVFrame *last_frame;
+    int last_dropped;
+    int last_nb0_frames[3];
+
+    void  *hwaccel_ctx;
+
+    /* video only */
+    AVRational frame_rate;
+    int is_cfr;
+    int force_fps;
+    int top_field_first;
+    int rotate_overridden;
+
+    AVRational frame_aspect_ratio;
+
+    /* forced key frames */
+    int64_t *forced_kf_pts;
+    int forced_kf_count;
+    int forced_kf_index;
+    char *forced_keyframes;
+    AVExpr *forced_keyframes_pexpr;
+    double forced_keyframes_expr_const_values[FKF_NB];
+
+    /* audio only */
+    int *audio_channels_map;             /* list of the channels id to pick from the source stream */
+    int audio_channels_mapped;           /* number of channels in audio_channels_map */
+
+    char *logfile_prefix;
+    FILE *logfile;
+
+    OutputFilter *filter;
+    char *avfilter;
+    char *filters;         ///< filtergraph associated to the -filter option
+    char *filters_script;  ///< filtergraph script associated to the -filter_script option
+
+    AVDictionary *encoder_opts;
+    AVDictionary *sws_dict;
+    AVDictionary *swr_opts;
+    AVDictionary *resample_opts;
+    char *apad;
+    OSTFinished finished;        /* no more packets should be written for this stream */
+    int unavailable;                     /* true if the steram is unavailable (possibly temporarily) */
+    int stream_copy;
+    const char *attachment_filename;
+    int copy_initial_nonkeyframes;
+    int copy_prior_start;
+    char *disposition;
+
+    int keep_pix_fmt;
+
+    AVCodecParserContext *parser;
+
+    /* stats */
+    // combined size of all the packets written
+    uint64_t data_size;
+    // number of packets send to the muxer
+    uint64_t packets_written;
+    // number of frames/samples sent to the encoder
+    uint64_t frames_encoded;
+    uint64_t samples_encoded;
+
+    /* packet quality factor */
+    int quality;
+
+    /* packet picture type */
+    int pict_type;
+
+    /* frame encode sum of squared error values */
+    int64_t error[4];
+} OutputStream;
+
+typedef struct OutputFile {
+    AVFormatContext *ctx;
+    AVDictionary *opts;
+    int ost_index;       /* index of the first stream in output_streams */
+    int64_t recording_time;  ///< desired length of the resulting file in microseconds == AV_TIME_BASE units
+    int64_t start_time;      ///< start time in microseconds == AV_TIME_BASE units
+    uint64_t limit_filesize; /* filesize limit expressed in bytes */
+
+    int shortest;
+} OutputFile;
+
+typedef struct SpecifierOpt {
+    char *specifier;    /**< stream/chapter/program/... specifier */
+    union {
+        uint8_t *str;
+        int        i;
+        int64_t  i64;
+        float      f;
+        double   dbl;
+    } u;
+} SpecifierOpt;
+
+typedef struct OptionDef {
+    const char *name;
+    int flags;
+#define HAS_ARG    0x0001
+#define OPT_BOOL   0x0002
+#define OPT_EXPERT 0x0004
+#define OPT_STRING 0x0008
+#define OPT_VIDEO  0x0010
+#define OPT_AUDIO  0x0020
+#define OPT_INT    0x0080
+#define OPT_FLOAT  0x0100
+#define OPT_SUBTITLE 0x0200
+#define OPT_INT64  0x0400
+#define OPT_EXIT   0x0800
+#define OPT_DATA   0x1000
+#define OPT_PERFILE  0x2000     /* the option is per-file (currently ffmpeg-only).
+                                   implied by OPT_OFFSET or OPT_SPEC */
+#define OPT_OFFSET 0x4000       /* option is specified as an offset in a passed optctx */
+#define OPT_SPEC   0x8000       /* option is to be stored in an array of SpecifierOpt.
+                                   Implies OPT_OFFSET. Next element after the offset is
+                                   an int containing element count in the array. */
+#define OPT_TIME  0x10000
+#define OPT_DOUBLE 0x20000
+#define OPT_INPUT  0x40000
+#define OPT_OUTPUT 0x80000
+     union {
+        void *dst_ptr;
+        int (*func_arg)(void *, const char *, const char *);
+        size_t off;
+    } u;
+    const char *help;
+    const char *argname;
+} OptionDef;
+/* select an input stream for an output stream */
+typedef struct StreamMap {
+    int disabled;           /* 1 is this mapping is disabled by a negative map */
+    int file_index;
+    int stream_index;
+    int sync_file_index;
+    int sync_stream_index;
+    char *linklabel;       /* name of an output link, for mapping lavfi outputs */
+} StreamMap;
+
+typedef struct {
+    int  file_idx,  stream_idx,  channel_idx; // input
+    int ofile_idx, ostream_idx;               // output
+} AudioChannelMap;
+
+
+typedef struct InputFilter {
+    AVFilterContext    *filter;
+    struct InputStream *ist;
+    struct FilterGraph *graph;
+    uint8_t            *name;
+} InputFilter;
+
+typedef struct OutputFilter {
+    AVFilterContext     *filter;
+    struct OutputStream *ost;
+    struct FilterGraph  *graph;
+    uint8_t             *name;
+
+    /* temporary storage until stream maps are processed */
+    AVFilterInOut       *out_tmp;
+    enum AVMediaType     type;
+} OutputFilter;
+
+typedef struct FilterGraph {
+    int            index;
+    const char    *graph_desc;
+
+    AVFilterGraph *graph;
+    int reconfiguration;
+
+    InputFilter   **inputs;
+    int          nb_inputs;
+    OutputFilter **outputs;
+    int         nb_outputs;
+} FilterGraph;
+
+typedef struct InputStream {
+    int file_index;
+    AVStream *st;
+    int discard;             /* true if stream data should be discarded */
+    int user_set_discard;
+    int decoding_needed;     /* non zero if the packets must be decoded in 'raw_fifo', see DECODING_FOR_* */
+#define DECODING_FOR_OST    1
+#define DECODING_FOR_FILTER 2
+
+    AVCodecContext *dec_ctx;
+    AVCodec *dec;
+    AVFrame *decoded_frame;
+    AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
+
+    int64_t       start;     /* time when read started */
+    /* predicted dts of the next packet read for this stream or (when there are
+     * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
+    int64_t       next_dts;
+    int64_t       dts;       ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
+
+    int64_t       next_pts;  ///< synthetic pts for the next decode frame (in AV_TIME_BASE units)
+    int64_t       pts;       ///< current pts of the decoded frame  (in AV_TIME_BASE units)
+    int           wrap_correction_done;
+
+    int64_t filter_in_rescale_delta_last;
+
+    int64_t min_pts; /* pts with the smallest value in a current stream */
+    int64_t max_pts; /* pts with the higher value in a current stream */
+    int64_t nb_samples; /* number of samples in the last decoded audio frame before looping */
+
+    double ts_scale;
+    int saw_first_ts;
+    int showed_multi_packet_warning;
+    AVDictionary *decoder_opts;
+    AVRational framerate;               /* framerate forced with -r */
+    int top_field_first;
+    int guess_layout_max;
+
+    int autorotate;
+    int resample_height;
+    int resample_width;
+    int resample_pix_fmt;
+
+    int      resample_sample_fmt;
+    int      resample_sample_rate;
+    int      resample_channels;
+    uint64_t resample_channel_layout;
+
+    int fix_sub_duration;
+    struct { /* previous decoded subtitle and related variables */
+        int got_output;
+        int ret;
+        AVSubtitle subtitle;
+    } prev_sub;
+
+    struct sub2video {
+        int64_t last_pts;
+        int64_t end_pts;
+        AVFrame *frame;
+        int w, h;
+    } sub2video;
+
+    int dr1;
+
+    /* decoded data from this stream goes into all those filters
+     * currently video and audio only */
+    InputFilter **filters;
+    int        nb_filters;
+
+    int reinit_filters;
+
+    /* hwaccel options */
+    enum HWAccelID hwaccel_id;
+    char  *hwaccel_device;
+
+    /* hwaccel context */
+    enum HWAccelID active_hwaccel_id;
+    void  *hwaccel_ctx;
+    void (*hwaccel_uninit)(AVCodecContext *s);
+    int  (*hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags);
+    int  (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
+    enum AVPixelFormat hwaccel_pix_fmt;
+    enum AVPixelFormat hwaccel_retrieved_pix_fmt;
+
+    /* stats */
+    // combined size of all the packets read
+    uint64_t data_size;
+    /* number of packets successfully read for this stream */
+    uint64_t nb_packets;
+    // number of frames/samples retrieved from the decoder
+    uint64_t frames_decoded;
+    uint64_t samples_decoded;
+} InputStream;
+
+typedef struct InputFile {
+    AVFormatContext *ctx;
+    int eof_reached;      /* true if eof reached */
+    int eagain;           /* true if last read attempt returned EAGAIN */
+    int ist_index;        /* index of first stream in input_streams */
+    int loop;             /* set number of times input stream should be looped */
+    int64_t duration;     /* actual duration of the longest stream in a file
+                             at the moment when looping happens */
+    AVRational time_base; /* time base of the duration */
+    int64_t input_ts_offset;
+
+    int64_t ts_offset;
+    int64_t last_ts;
+    int64_t start_time;   /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
+    int seek_timestamp;
+    int64_t recording_time;
+    int nb_streams;       /* number of stream that ffmpeg is aware of; may be different
+                             from ctx.nb_streams if new streams appear during av_read_frame() */
+    int nb_streams_warn;  /* number of streams that the user was warned of */
+    int rate_emu;
+    int accurate_seek;
+
+#if HAVE_PTHREADS
+    AVThreadMessageQueue *in_thread_queue;
+    pthread_t thread;           /* thread reading from this file */
+    int non_blocking;           /* reading packets from the thread should not block */
+    int joined;                 /* the thread has been joined */
+    int thread_queue_size;      /* maximum number of queued packets */
+#endif
+} InputFile;
+
 class OSD;
 
 QT_BEGIN_NAMESPACE
@@ -40,29 +360,12 @@ protected:
        bool encode_audio;
 
 #if defined(USE_LIBAV)
-       AVFormatContext *output_context;
-       AVOutputFormat *format;
-       AVCodec *audio_codec;
-       AVCodecContext *audio_codec_context;
-       AVCodec *video_codec;
-       AVCodecContext *video_codec_context;
-       
-       AVStream *audio_stream;
-       AVStream *video_stream;
-
-       AVFrame *audio_frame_data;
-       AVFrame *audio_tmp_frame;
-       struct AVCodec codec_real;
-       struct SwrContext *audio_swr_context;
-       int audio_nb_samples;
-       int64_t audio_samples_count;
-       
-       int64_t audio_next_pts;
-       AVDictionary *audio_option;
-       AVDictionary *video_option;
+       OutputStream video_stream;
+       OutputStream audio_stream;
 #endif   
        QString _filename;
        bool bRunThread;
+       bool debug_timestamp;
        
        uint min_rate;
        uint max_rate;
@@ -73,7 +376,6 @@ protected:
        bool recording;
        int rec_fps;
 
-       AVRational time_base;
        
        uint64_t audio_size;
        uint64_t video_size;
@@ -83,15 +385,24 @@ protected:
        uint64_t totalDstFrame;
        uint64_t totalAudioFrame;
 
-       int16_t audio_frame[2 * 48000 * sizeof(int16_t)]; // 1Sec
-       uint32_t video_frame[1280 * 512 * sizeof(uint32_t)]; // 1 frame : right?
-       uint32_t video_dst[1280 * 1024 * sizeof(uint32_t)]; // 1 frame : right?
+       int16_t audio_frame_buf[2 * 48000 * sizeof(int16_t)]; // 1Sec
+       uint32_t video_frame_buf[1280 * 512 * sizeof(uint32_t)]; // 1 frame : right?
+       uint32_t video_dst_buf[1280 * 1024 * sizeof(uint32_t)]; // 1 frame : right?
 
        QQueue<int> video_width_queue;
        QQueue<int> video_height_queue;
        QQueue<QByteArray *> video_data_queue;
        
        QQueue<QByteArray *> audio_data_queue;
+       int64_t audio_remain;
+       int64_t video_remain;
+       uint32_t audio_offset;
+       uint32_t audio_frame_offset;
+       uint32_t video_offset;
+       uint64_t audio_frame_number;
+       uint64_t audio_frame_max;
+       uint64_t video_frame_number;
+       uint64_t video_frame_max;
        
        bool dequeue_audio(int16_t *);
        bool dequeue_video(uint32_t *);
@@ -110,6 +421,7 @@ protected:
        bool audio_resample(void *_frame);
        bool setup_audio_resampler(void);
        void add_stream_audio(void **_codec, int _codec_id);
+       void *get_one_audio_frame(bool *continue_flag);
        
 public:
        MOVIE_SAVER(int width, int height, int fps, OSD *osd);
index fccab08..7598cee 100644 (file)
@@ -9,32 +9,35 @@
 #include "../osd.h"
 #include "agar_logger.h"
 
-
+extern "C" {
+       #include "libavutil/common.h"
+       #include "libavutil/intreadwrite.h"
+}
 void *MOVIE_SAVER::alloc_audio_frame(int  _sample_fmt,
                                                                                uint64_t channel_layout,
                                                                                int sample_rate, int nb_samples)
 {
 #if defined(USE_LIBAV)
        enum AVSampleFormat sample_fmt = (enum AVSampleFormat)_sample_fmt;
-    AVFrame *frame = av_frame_alloc();
-    int ret;
-    if (!frame) {
-        AGAR_DebugLog(AGAR_LOG_DEBUG, "Error allocating an audio frame\n");
-        //exit(1);
+       AVFrame *frame = av_frame_alloc();
+       int ret;
+       if (!frame) {
+               AGAR_DebugLog(AGAR_LOG_DEBUG, "Error allocating an audio frame\n");
+               //exit(1);
                return NULL;
-    }
-    frame->format = sample_fmt;
-    frame->channel_layout = channel_layout;
-    frame->sample_rate = sample_rate;
-    frame->nb_samples = nb_samples;
-    if (nb_samples) {
-        ret = av_frame_get_buffer(frame, 0);
-        if (ret < 0) {
-            AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver: Error allocating an audio buffer");
-            return NULL;
-        }
-    }
-    return (void *)frame;
+       }
+       frame->format = sample_fmt;
+       frame->channel_layout = channel_layout;
+       frame->sample_rate = sample_rate;
+       frame->nb_samples = nb_samples;
+       if (nb_samples) {
+               ret = av_frame_get_buffer(frame, 0);
+               if (ret < 0) {
+                       AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver: Error allocating an audio buffer");
+                       return NULL;
+               }
+       }
+       return (void *)frame;
 #else
        return NULL;
 #endif
@@ -48,12 +51,12 @@ int MOVIE_SAVER::write_audio_frame(const void *_time_base, void *_pkt)
        AVRational *time_base = (AVRational *)_time_base;
        AVPacket *pkt = (AVPacket *)_pkt;
        
-    /* rescale output packet timestamp values from codec to stream timebase */
-    av_packet_rescale_ts(pkt, *time_base, st->time_base);
-    pkt->stream_index = st->index;
-    /* Write the compressed frame to the media file. */
-    //log_packet(fmt_ctx, pkt);
-    return av_interleaved_write_frame(fmt_ctx, pkt);
+       /* rescale output packet timestamp values from codec to stream timebase */
+       av_packet_rescale_ts(pkt, *time_base, st->time_base);
+       pkt->stream_index = st->index;
+       /* Write the compressed frame to the media file. */
+       //log_packet(fmt_ctx, pkt);
+       return av_interleaved_write_frame(fmt_ctx, pkt);
 #else
        return -1;
 #endif
@@ -75,9 +78,9 @@ bool MOVIE_SAVER::audio_resample(void *_frame)
                                                                                  AV_ROUND_UP);
        if(audio_dst_nb_samples == frame->nb_samples) {
                AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver: Error while re-sampling sound:");
-               //ret = av_frame_make_writable(frame);
-               //return true;
-               return false;
+               ret = av_frame_make_writable(frame);
+               return true;
+               //return false;
        }
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
@@ -115,12 +118,12 @@ bool MOVIE_SAVER::setup_audio_resampler(void)
                AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver/Audio: Could not allocate resampler context\n");
                return false;
        }
-       av_opt_set_int       (audio_swr_context, "in_channel_count",   audio_codec_context->channels,       0);
-       av_opt_set_int       (audio_swr_context, "in_sample_rate",     audio_codec_context->sample_rate,    0);
-       av_opt_set_sample_fmt(audio_swr_context, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
-       av_opt_set_int       (audio_swr_context, "out_channel_count",  audio_codec_context->channels,       0);
-       av_opt_set_int       (audio_swr_context, "out_sample_rate",    audio_codec_context->sample_rate,    0);
-       av_opt_set_sample_fmt(audio_swr_context, "out_sample_fmt",     audio_codec_context->sample_fmt,     0);
+       av_opt_set_int     (audio_swr_context, "in_channel_count",   audio_codec_context->channels,        0);
+       av_opt_set_int     (audio_swr_context, "in_sample_rate",         audio_codec_context->sample_rate,      0);
+       av_opt_set_sample_fmt(audio_swr_context, "in_sample_fmt",         AV_SAMPLE_FMT_S16, 0);
+       av_opt_set_int     (audio_swr_context, "out_channel_count",  audio_codec_context->channels,        0);
+       av_opt_set_int     (audio_swr_context, "out_sample_rate",       audio_codec_context->sample_rate,       0);
+       av_opt_set_sample_fmt(audio_swr_context, "out_sample_fmt",       audio_codec_context->sample_fmt,        0);
        /* initialize the resampling context */
        if ((ret = swr_init(audio_swr_context)) < 0) {
                AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver/Audio: Failed to initialize the resampling context\n");
@@ -133,52 +136,575 @@ bool MOVIE_SAVER::setup_audio_resampler(void)
 
 void MOVIE_SAVER::add_stream_audio(void **_codec, int _codec_id)
 {
-    AVCodecContext *c;
+       AVCodecContext *c;
        AVCodec **codec = (AVCodec **)_codec;
        enum AVCodecID codec_id = (enum AVCodecID)_codec_id;
-    int i;
+       int i;
+
+       /* find the encoder */
+       *codec = avcodec_find_encoder(codec_id);
+       if (!(*codec)) {
+               fprintf(stderr, "Could not find encoder for '%s'\n",
+                               avcodec_get_name(codec_id));
+               exit(1);
+       }
+
+       audio_stream = avformat_new_stream(output_context, *codec);
+       if (!audio_stream) {
+               fprintf(stderr, "Could not allocate stream\n");
+               exit(1);
+       }
+       audio_stream->id = output_context->nb_streams-1;
+       c = audio_stream->codec;
+
+       {
+               c->sample_fmt  = (*codec)->sample_fmts ?
+                       (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
+               c->bit_rate     = 64000;
+               c->sample_rate = 44100;
+               if ((*codec)->supported_samplerates) {
+                       c->sample_rate = (*codec)->supported_samplerates[0];
+                       for (i = 0; (*codec)->supported_samplerates[i]; i++) {
+                               if ((*codec)->supported_samplerates[i] == 44100)
+                                       c->sample_rate = 44100;
+                       }
+               }
+               c->channels             = av_get_channel_layout_nb_channels(c->channel_layout);
+               c->channel_layout = AV_CH_LAYOUT_STEREO;
+               if ((*codec)->channel_layouts) {
+                       c->channel_layout = (*codec)->channel_layouts[0];
+                       for (i = 0; (*codec)->channel_layouts[i]; i++) {
+                               if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
+                                       c->channel_layout = AV_CH_LAYOUT_STEREO;
+                       }
+               }
+               c->channels             = av_get_channel_layout_nb_channels(c->channel_layout);
+               audio_stream->time_base = (AVRational){ 1, c->sample_rate };
+       }
+       /* Some formats want stream headers to be separate. */
+       if (output_context->oformat->flags & AVFMT_GLOBALHEADER)
+               c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+}
+
+void *MOVIE_SAVER::get_one_audio_frame(bool *continue_flag)
+{
+       int16_t *optr;
+#if defined(USE_LIBAV)
+       AVFrame *frame;
+       frame = audio_frame_data;
+       if(av_compare_ts(audio_next_pts, audio_stream->codec->time_base,
+                                        10.0, (AVRational){ 1, 1 }) >= 0) {
+               AGAR_DebugLog(AGAR_LOG_DEBUG, "*");
+               *continue_flag = false;
+               //audio_continue = false;
+               return (void *)NULL;
+       }
+       optr = (int16_t *)frame->data;
+       for(int j = audio_frame_offset; j < frame->nb_samples; j++) {
+               if(audio_remain <= 0) {
+                       *continue_flag = true;
+                       audio_offset = 0;
+                       return (void *)frame;
+               }
+               if(audio_offset >= audio_size) {
+                       audio_offset = 0;
+                       *continue_flag = true;
+                       return (void *)frame;
+               }
+               for(int k = 0; k < audio_stream->codec->channels; k++) {
+                       //optr[(audio_frame_offset * audio_stream->codec->channels)+ k] = ptr[audio_offset + k];
+               }
+               audio_offset += audio_stream->codec->channels;
+               audio_remain -= audio_stream->codec->channels;
+               audio_frame_offset++;
+       }
+       *continue_flag = false; // GO
+       return (void *)frame;
+#else
+       return NULL;
+#endif
+}
+
+// Got from ffmpeg 3.0.2 .
+// static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+bool MOVIE_SAVER::write_frame(void *_avformatcontext_s, void *_pkt, void *_ost)
+{
+#if defined(USE_LIBAV)
+       OutputStream *ost = (OutputStream *)_ost;
+       AVPacket *pkt = _pkt;
+       AVFormatContext *s = _avformatcontext_s;
+    AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
+    AVCodecContext          *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
+    int ret;
+
+       if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
+               ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+               if (ost->st->codec->extradata) {
+                       memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
+                       ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
+               }
+       }
+       
+       if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
+               (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
+               pkt->pts = pkt->dts = AV_NOPTS_VALUE;
+       
+       /*
+        * Audio encoders may split the packets --  #frames in != #packets out.
+        * But there is no reordering, so we can limit the number of output packets
+        * by simply dropping them here.
+        * Counting encoded video frames needs to be done separately because of
+        * reordering, see do_video_out()
+        */
+       if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
+               if (ost->frame_number >= ost->max_frames) {
+                       av_packet_unref(pkt);
+                       return false; // Overflow
+               }
+               ost->frame_number++;
+       }
+       if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+               int i;
+               uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
+                                                                                         NULL);
+               ost->quality = sd ? AV_RL32(sd) : -1;
+               ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
+
+               for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
+                       if (sd && i < sd[5])
+                               ost->error[i] = AV_RL64(sd + 8 + 8*i);
+                       else
+                               ost->error[i] = -1;
+               }
+
+               if (ost->frame_rate.num && ost->is_cfr) {
+                       if (pkt->duration > 0)
+                               AGAR_DebugLog(AGAR_LOG_WARN, "Movie/Saver: Overriding packet duration by frame rate, this should not happen\n");
+                       pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
+                                                                                ost->st->time_base);
+               }
+       }
+
+       if (bsfc)
+               av_packet_split_side_data(pkt);
+
+       if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
+               AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver: BitstreamFilters failed", ret);
+               return false;
+               //if (exit_on_error)
+               //      exit_program(1);
+       }
+
+       if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
+               if (pkt->dts != AV_NOPTS_VALUE &&
+                       pkt->pts != AV_NOPTS_VALUE &&
+                       pkt->dts > pkt->pts) {
+                       AGAR_DebugLog(AGAR_LOG_WARN, "Movie/Saver: Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
+                                                 pkt->dts, pkt->pts,
+                                                 ost->file_index, ost->st->index);
+                       pkt->pts =
+                       pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
+                               - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
+                               - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
+               }
+               if(
+                (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
+                pkt->dts != AV_NOPTS_VALUE &&
+                ost->last_mux_dts != AV_NOPTS_VALUE) {
+                       int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
+                       if (pkt->dts < max) {
+                               int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AGAR_LOG_WARN : AGAR_LOG_DEBUG;
+                               AGAR_DebugLog(loglevel, "Movie/Saver: Non-monotonous DTS in output stream "
+                                                         "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
+                                                         ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
+                               //if (exit_on_error) {
+                               //      av_log(NULL, AV_LOG_FATAL, "aborting.\n");
+                               //      exit_program(1);
+                               //}
+                               AGAR_DebugLog(loglevel, "Movie/Saver: changing to %"PRId64". This may result "
+                                                         "in incorrect timestamps in the output file.\n",
+                                                         max);
+                               if(pkt->pts >= pkt->dts)
+                                       pkt->pts = FFMAX(pkt->pts, max);
+                               pkt->dts = max;
+                       }
+               }
+       }
+       ost->last_mux_dts = pkt->dts;
+
+       ost->data_size += pkt->size;
+       ost->packets_written++;
+
+       pkt->stream_index = ost->index;
+
+       if (debug_timestamp) {
+               AGAR_DebugLog(AGAR_LOG_INFO, "Movie/Saver: muxer <- type:%s "
+                                         "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
+                                         (const char *)av_get_media_type_string(ost->enc_ctx->codec_type),
+                                         (const char *)av_ts2str(pkt->pts),
+                                         (const char *)av_ts2timestr(pkt->pts, &ost->st->time_base),
+                                         (const char *)av_ts2str(pkt->dts),
+                                         (const char *)av_ts2timestr(pkt->dts, &ost->st->time_base),
+                                         pkt->size
+                       );
+       }
+
+       ret = av_interleaved_write_frame(s, pkt);
+       if (ret < 0) {
+               AGAR_DebugLog(AGAR_LOG_DEBUG, "Movie/Saver: av_interleaved_write_frame()", ret);
+               //main_return_code = 1;
+               //close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
+               do_close();
+       }
+       av_packet_unref(pkt);
+#endif
+       return true; // Bool
+}
+
+
+//static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
+bool MOVIE_SAVER::do_audio_out(void *_s, void *_ost, void *_frame)
+{
+#if defined(USE_LIBAV)
+       AVFormatContext *s = (AVFormatContext *)_s;
+       OutputStream *ost = (OutputStream *)_ost,
+       AVFrame *frame = (AVFrame *)_frame;
+    AVCodecContext *enc = ost->enc_ctx;
+    AVPacket pkt;
+    int got_packet = 0;
+
+    av_init_packet(&pkt);
+    pkt.data = NULL;
+    pkt.size = 0;
+
+    if (!check_recording_time(ost)){
+               //AGAR_DebugLog(AGAR_LOG_INFO, "Movie/Saver: AUDIO SEND: GOT Null packet.");
+        return false;
+       }
+    if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
+        frame->pts = ost->sync_opts;
+    ost->sync_opts = frame->pts + frame->nb_samples;
+    ost->samples_encoded += frame->nb_samples;
+    ost->frames_encoded++;
+
+    if(pkt.size || !pkt.data) {
+               AGAR_DebugLog(AGAR_LOG_INFO, "Movie/Saver: AUDIO SEND: GOT Null packet.");
+               do_close();
+               return false;
+       }
+    //update_benchmark(NULL);
+    if (debug_timestamp) {
+               AGAR_DebugLog(AGAR_LOG_INFO, "encoder <- type:audio "
+                                         "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
+                                         av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
+                                         enc->time_base.num, enc->time_base.den);
+    }
 
-    /* find the encoder */
-    *codec = avcodec_find_encoder(codec_id);
-    if (!(*codec)) {
-        fprintf(stderr, "Could not find encoder for '%s'\n",
-                avcodec_get_name(codec_id));
-        exit(1);
+    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
+        AGAR_DebugLog(AGAR_LOG_INFO, "Audio encoding failed (avcodec_encode_audio2)\n");
+               return false;
+        //exit_program(1);
     }
+//update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
+
+    if (got_packet) {
+        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
 
-    audio_stream = avformat_new_stream(output_context, *codec);
-    if (!audio_stream) {
-        fprintf(stderr, "Could not allocate stream\n");
-        exit(1);
+        if (debug_timestamp) {
+            AGAR_DebugLog(AGAR_LOG_INFO, "Movie/Saver: encoder -> type:audio "
+                                                 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+                                                 (const char *)av_ts2str(pkt.pts),
+                                                 (const char *)av_ts2timestr(pkt.pts, &ost->st->time_base),
+                                                 (const char *)av_ts2str(pkt.dts),
+                                                 (const char *)av_ts2timestr(pkt.dts, &ost->st->time_base));
+        }
+
+        write_frame(s, &pkt, ost);
     }
-    audio_stream->id = output_context->nb_streams-1;
-    c = audio_stream->codec;
-
-    {
-        c->sample_fmt  = (*codec)->sample_fmts ?
-            (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
-        c->bit_rate    = 64000;
-        c->sample_rate = 44100;
-        if ((*codec)->supported_samplerates) {
-            c->sample_rate = (*codec)->supported_samplerates[0];
-            for (i = 0; (*codec)->supported_samplerates[i]; i++) {
-                if ((*codec)->supported_samplerates[i] == 44100)
-                    c->sample_rate = 44100;
+#endif
+       return true;
+}
+
+
+/**
+ * Get and encode new output from any of the filtergraphs, without causing
+ * activity.
+ *
+ * @return  0 for success, <0 for severe errors
+ */
+int MOVIE_SAVER::reap_filters(int flush)
+{
+    AVFrame *filtered_frame = NULL;
+    int i;
+
+    /* Reap all buffers present in the buffer sinks */
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = output_streams[i];
+        OutputFile    *of = output_files[ost->file_index];
+        AVFilterContext *filter;
+        AVCodecContext *enc = ost->enc_ctx;
+        int ret = 0;
+
+        if (!ost->filter)
+            continue;
+        filter = ost->filter->filter;
+
+        if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
+            return AVERROR(ENOMEM);
+        }
+        filtered_frame = ost->filtered_frame;
+
+        while (1) {
+            double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
+            ret = av_buffersink_get_frame_flags(filter, filtered_frame,
+                                               AV_BUFFERSINK_FLAG_NO_REQUEST);
+            if (ret < 0) {
+                if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
+                    av_log(NULL, AV_LOG_WARNING,
+                           "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
+                } else if (flush && ret == AVERROR_EOF) {
+                    //if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
+                    //    do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
+                }
+                break;
+            }
+            if (ost->finished) {
+                av_frame_unref(filtered_frame);
+                continue;
+            }
+            if (filtered_frame->pts != AV_NOPTS_VALUE) {
+                int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+                AVRational tb = enc->time_base;
+                int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
+
+                tb.den <<= extra_bits;
+                float_pts =
+                    av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
+                    av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
+                float_pts /= 1 << extra_bits;
+                // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
+                float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
+
+                filtered_frame->pts =
+                    av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
+                    av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
+            }
+            //if (ost->source_index >= 0)
+            //    *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
+
+            switch (filter->inputs[0]->type) {
+            case AVMEDIA_TYPE_VIDEO:
+                if (!ost->frame_aspect_ratio.num)
+                    enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
+
+                if (debug_timestamp) {
+                    AGAR_DebugLog(AGAR_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
+                                                                 (const char *)av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
+                                                                 float_pts,
+                                                                 enc->time_base.num, enc->time_base.den);
+                }
+
+                //do_video_out(of->ctx, ost, filtered_frame, float_pts);
+                break;
+            case AVMEDIA_TYPE_AUDIO:
+                if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
+                    enc->channels != av_frame_get_channels(filtered_frame)) {
+                    AGAR_DebugLog(AGAR_LOG_INFO,
+                                                                 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
+                    break;
+                }
+                do_audio_out(of->ctx, ost, filtered_frame);
+                break;
+            default:
+                // TODO support subtitle filters
+                //av_assert0(0);
+                               return 0;
             }
+            av_frame_unref(filtered_frame);
         }
-        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
-        c->channel_layout = AV_CH_LAYOUT_STEREO;
-        if ((*codec)->channel_layouts) {
-            c->channel_layout = (*codec)->channel_layouts[0];
-            for (i = 0; (*codec)->channel_layouts[i]; i++) {
-                if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
-                    c->channel_layout = AV_CH_LAYOUT_STEREO;
+    }
+    return 0;
+}
+
+//void flush_encoders(void)
+void MOVIE_SAVER::flush_encoders(void)
+{
+    int i, ret;
+#if defined(USE_LIBAV)
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream   *ost = output_streams[i];
+        AVCodecContext *enc = ost->enc_ctx;
+        AVFormatContext *os = output_files[ost->file_index]->ctx;
+        int stop_encoding = 0;
+
+        if (!ost->encoding_needed)
+            continue;
+
+        if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
+            continue;
+#if FF_API_LAVF_FMT_RAWPICTURE
+        if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
+            continue;
+#endif
+
+        for (;;) {
+            int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
+            const char *desc;
+
+            switch (enc->codec_type) {
+            case AVMEDIA_TYPE_AUDIO:
+                encode = avcodec_encode_audio2;
+                desc   = "audio";
+                break;
+            case AVMEDIA_TYPE_VIDEO:
+                encode = avcodec_encode_video2;
+                desc   = "video";
+                break;
+            default:
+                stop_encoding = 1;
+            }
+
+            if (encode) {
+                AVPacket pkt;
+                int pkt_size;
+                int got_packet;
+                av_init_packet(&pkt);
+                pkt.data = NULL;
+                pkt.size = 0;
+
+                //update_benchmark(NULL);
+                ret = encode(enc, &pkt, NULL, &got_packet);
+                AGAR_DebugLog(AGAR_LOG_DEBUG, "flush_%s %d.%d",
+                                                         (const char *)desc,
+                                                         ost->file_index,
+                                                         ost->index);
+                if (ret < 0) {
+                    AGAR_DebugLog(AGAR_LOG_INFO, "%s encoding failed: %s\n",
+                                                                 desc,
+                                                                 (const char *)av_err2str(ret));
+                    do_close();
+                                       return;
+                }
+                if (ost->logfile && enc->stats_out) {
+                    fprintf(ost->logfile, "%s", enc->stats_out);
+                }
+                if (!got_packet) {
+                    stop_encoding = 1;
+                    break;
+                }
+                if (ost->finished & MUXER_FINISHED) {
+                    av_packet_unref(&pkt);
+                    continue;
+                }
+                av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+                pkt_size = pkt.size;
+                write_frame(os, &pkt, ost);
+                if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
+                    do_video_stats(ost, pkt_size);
+                }
             }
+
+            if (stop_encoding)
+                break;
         }
-        c->channels        = av_get_channel_layout_nb_channels(c->channel_layout);
-        audio_stream->time_base = (AVRational){ 1, c->sample_rate };
-       }
-    /* Some formats want stream headers to be separate. */
-    if (output_context->oformat->flags & AVFMT_GLOBALHEADER)
-        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+    }
+}
+
+
+/**
+ * Select the output stream to process.
+ *
+ * @return  selected output stream, or NULL if none available
+ */
+void *MOVIE_SAVER::*choose_output(void)
+{
+#if defined(USE_LIBAV)
+    int i;
+    int64_t opts_min = INT64_MAX;
+    OutputStream *ost_min = NULL;
+
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = output_streams[i];
+        int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
+                       av_rescale_q(ost->st->cur_dts, ost->st->time_base,
+                                    AV_TIME_BASE_Q);
+        if (ost->st->cur_dts == AV_NOPTS_VALUE)
+            AGAR_DebugLog(AGAR_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
+
+        if (!ost->finished && opts < opts_min) {
+            opts_min = opts;
+            ost_min  = ost->unavailable ? NULL : ost;
+        }
+    }
+    return (void *)ost_min;
+#else
+       return (void *)NULL;
+#endif 
+}
+
+int MOVIE_SAVER::got_eagain(void)
+{
+    int i;
+    for (i = 0; i < nb_output_streams; i++)
+        if (output_streams[i]->unavailable)
+            return 1;
+    return 0;
+}
+
+void MOVIE_SAVER::reset_eagain(void)
+{
+    int i;
+    for (i = 0; i < nb_input_files; i++)
+        input_files[i]->eagain = 0;
+    for (i = 0; i < nb_output_streams; i++)
+        output_streams[i]->unavailable = 0;
+}
+
+
+/**
+ * Run a single step of transcoding.
+ *
+ * @return  0 for success, <0 for error
+ */
+int MOVIE_SAVER::transcode_step(void)
+{
+#if defined(USE_LIBAV)
+    OutputStream *ost;
+    InputStream  *ist;
+    int ret;
+
+    ost = (OutputStream *)choose_output();
+    if (!ost) {
+        if (got_eagain()) {
+            reset_eagain();
+            this->usleep(10000);
+            return 0;
+        }
+        AGAR_DebugLog(AGAR_LOG_INFO, "No more inputs to read from, finishing.\n");
+        return AVERROR_EOF;
+    }
+
+    //if (ost->filter) {
+    //    if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
+    //        return ret;
+    //    if (!ist)
+    //        return 0;
+    //} else {
+        //av_assert0(ost->source_index >= 0);
+        //ist = input_streams[ost->source_index];
+    //}
+
+    ret = process_input(ist->file_index);
+    if (ret == AVERROR(EAGAIN)) {
+        if (input_files[ist->file_index]->eagain)
+            ost->unavailable = 1;
+        return 0;
+    }
+
+    if (ret < 0)
+        return ret == AVERROR_EOF ? 0 : ret;
+
+    return reap_filters(0);
 }
+
+
index c23c7ca..252a5b5 100644 (file)
 #include "agar_logger.h"
 
 
+#define GROW_ARRAY(array, nb_elems)\
+    array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
+
+#define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
+{\
+    int i, ret;\
+    for (i = 0; i < o->nb_ ## name; i++) {\
+        char *spec = o->name[i].specifier;\
+        if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
+            outvar = o->name[i].u.type;\
+        else if (ret < 0)\
+            exit_program(1);\
+    }\
+}
+
+#define MATCH_PER_TYPE_OPT(name, type, outvar, fmtctx, mediatype)\
+{\
+    int i;\
+    for (i = 0; i < o->nb_ ## name; i++) {\
+        char *spec = o->name[i].specifier;\
+        if (!strcmp(spec, mediatype))\
+            outvar = o->name[i].u.type;\
+    }\
+}
+
+void *MOVIE_SAVER::grow_array(void *array, int elem_size, int *size, int new_size)
+{
+    if (new_size >= INT_MAX / elem_size) {
+        av_log(NULL, AV_LOG_ERROR, "Array too big.\n");
+        exit_program(1);
+    }
+    if (*size < new_size) {
+        uint8_t *tmp = av_realloc_array(array, new_size, elem_size);
+        if (!tmp) {
+            av_log(NULL, AV_LOG_ERROR, "Could not alloc buffer.\n");
+            exit_program(1);
+        }
+        memset(tmp + *size*elem_size, 0, (new_size-*size) * elem_size);
+        *size = new_size;
+        return tmp;
+    }
+    return array;
+}
+
+
+// From ffmpeg 3.0.2
+static OutputStream *MOVIE_SAVER::new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type, int source_index)
+{
+    OutputStream *ost;
+    AVStream *st = avformat_new_stream(oc, NULL);
+    int idx      = oc->nb_streams - 1, ret = 0;
+    char *bsf = NULL, *next, *codec_tag = NULL;
+    AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
+    double qscale = -1;
+    int i;
+
+    if (!st) {
+        av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
+        exit_program(1);
+    }
+       //FIXME
+    if (oc->nb_streams - 1 < o->nb_streamid_map)
+        st->id = o->streamid_map[oc->nb_streams - 1];
+
+    GROW_ARRAY(output_streams, nb_output_streams);
+    if (!(ost = av_mallocz(sizeof(*ost))))
+        exit_program(1);
+    output_streams[nb_output_streams - 1] = ost;
+
+    ost->file_index = nb_output_files - 1;
+    ost->index      = idx;
+    ost->st         = st;
+    st->codec->codec_type = type;
+    choose_encoder(o, oc, ost);
+
+    ost->enc_ctx = avcodec_alloc_context3(ost->enc);
+    if (!ost->enc_ctx) {
+        av_log(NULL, AV_LOG_ERROR, "Error allocating the encoding context.\n");
+        exit_program(1);
+    }
+    ost->enc_ctx->codec_type = type;
+
+    if (ost->enc) {
+        AVIOContext *s = NULL;
+        char *buf = NULL, *arg = NULL, *preset = NULL;
+               //FIXME
+        ost->encoder_opts  = filter_codec_opts(o->g->codec_opts, ost->enc->id, oc, st, ost->enc);
+
+        MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
+        if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
+            do  {
+                buf = get_line(s);
+                if (!buf[0] || buf[0] == '#') {
+                    av_free(buf);
+                    continue;
+                }
+                if (!(arg = strchr(buf, '='))) {
+                    av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
+                    exit_program(1);
+                }
+                *arg++ = 0;
+                av_dict_set(&ost->encoder_opts, buf, arg, AV_DICT_DONT_OVERWRITE);
+                av_free(buf);
+            } while (!s->eof_reached);
+            avio_closep(&s);
+        }
+        if (ret) {
+            av_log(NULL, AV_LOG_FATAL,
+                   "Preset %s specified for stream %d:%d, but could not be opened.\n",
+                   preset, ost->file_index, ost->index);
+            exit_program(1);
+        }
+    } else {
+               //FIXME
+        ost->encoder_opts = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, NULL);
+    }
+
+    ost->max_frames = INT64_MAX;
+       //FIXME
+    MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
+    for (i = 0; i<o->nb_max_frames; i++) {
+        char *p = o->max_frames[i].specifier;
+        if (!*p && type != AVMEDIA_TYPE_VIDEO) {
+            av_log(NULL, AV_LOG_WARNING, "Applying unspecific -frames to non video streams, maybe you meant -vframes ?\n");
+            break;
+        }
+    }
+
+    ost->copy_prior_start = -1;
+    MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st);
+
+    MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
+    while (bsf) {
+        char *arg = NULL;
+        if (next = strchr(bsf, ','))
+            *next++ = 0;
+        if (arg = strchr(bsf, '='))
+            *arg++ = 0;
+        if (!(bsfc = av_bitstream_filter_init(bsf))) {
+            av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
+            exit_program(1);
+        }
+        if (bsfc_prev)
+            bsfc_prev->next = bsfc;
+        else
+            ost->bitstream_filters = bsfc;
+        if (arg)
+            if (!(bsfc->args = av_strdup(arg))) {
+                av_log(NULL, AV_LOG_FATAL, "Bitstream filter memory allocation failed\n");
+                exit_program(1);
+            }
+
+        bsfc_prev = bsfc;
+        bsf       = next;
+    }
+
+    MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
+    if (codec_tag) {
+        uint32_t tag = strtol(codec_tag, &next, 0);
+        if (*next)
+            tag = AV_RL32(codec_tag);
+        ost->st->codec->codec_tag =
+        ost->enc_ctx->codec_tag = tag;
+    }
+
+    MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
+    if (qscale >= 0) {
+        ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
+        ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
+    }
+
+    MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
+    ost->disposition = av_strdup(ost->disposition);
+
+    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
+        ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+       //FIXME
+    av_dict_copy(&ost->sws_dict, o->g->sws_dict, 0);
+       //FIXME
+    av_dict_copy(&ost->swr_opts, o->g->swr_opts, 0);
+    if (ost->enc && av_get_exact_bits_per_sample(ost->enc->id) == 24)
+        av_dict_set(&ost->swr_opts, "output_sample_bits", "24", 0);
+       //FIXME
+    av_dict_copy(&ost->resample_opts, o->g->resample_opts, 0);
+
+    ost->source_index = source_index;
+    if (source_index >= 0) {
+        ost->sync_ist = input_streams[source_index];
+        input_streams[source_index]->discard = 0;
+        input_streams[source_index]->st->discard = input_streams[source_index]->user_set_discard;
+    }
+    ost->last_mux_dts = AV_NOPTS_VALUE;
+
+    return ost;
+}
+
+static int open_output_file(OptionsContext *o, const char *filename)
+{
+    AVFormatContext *oc;
+    int i, j, err;
+    AVOutputFormat *file_oformat;
+    OutputFile *of;
+    OutputStream *ost;
+    InputStream  *ist;
+    AVDictionary *unused_opts = NULL;
+    AVDictionaryEntry *e = NULL;
+
+       //FIXME
+    if (o->stop_time != INT64_MAX && o->recording_time != INT64_MAX) {
+        o->stop_time = INT64_MAX;
+        av_log(NULL, AV_LOG_WARNING, "-t and -to cannot be used together; using -t.\n");
+    }
+       //FIXME
+    if (o->stop_time != INT64_MAX && o->recording_time == INT64_MAX) {
+        int64_t start_time = o->start_time == AV_NOPTS_VALUE ? 0 : o->start_time;
+        if (o->stop_time <= start_time) {
+            av_log(NULL, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
+            exit_program(1);
+        } else {
+            o->recording_time = o->stop_time - start_time;
+        }
+    }
+
+    GROW_ARRAY(output_files, nb_output_files);
+    of = av_mallocz(sizeof(*of));
+    if (!of)
+        exit_program(1);
+    output_files[nb_output_files - 1] = of;
+       //FIXME
+    of->ost_index      = nb_output_streams;
+    of->recording_time = o->recording_time;
+    of->start_time     = o->start_time;
+    of->limit_filesize = o->limit_filesize;
+    of->shortest       = o->shortest;
+    av_dict_copy(&of->opts, o->g->format_opts, 0);
+
+    if (!strcmp(filename, "-"))
+        filename = "pipe:";
+       //FIXME
+    err = avformat_alloc_output_context2(&oc, NULL, o->format, filename);
+    if (!oc) {
+        print_error(filename, err);
+        exit_program(1);
+    }
+
+    of->ctx = oc;
+       //FIXME
+    if (o->recording_time != INT64_MAX)
+        oc->duration = o->recording_time;
+
+    file_oformat= oc->oformat;
+    oc->interrupt_callback = int_cb;
+
+    /* create streams for all unlabeled output pads */
+    for (i = 0; i < nb_filtergraphs; i++) {
+        FilterGraph *fg = filtergraphs[i];
+        for (j = 0; j < fg->nb_outputs; j++) {
+            OutputFilter *ofilter = fg->outputs[j];
+
+            if (!ofilter->out_tmp || ofilter->out_tmp->name)
+                continue;
+                       //FIXME
+            switch (ofilter->type) {
+            case AVMEDIA_TYPE_VIDEO:    o->video_disable    = 1; break;
+            case AVMEDIA_TYPE_AUDIO:    o->audio_disable    = 1; break;
+            case AVMEDIA_TYPE_SUBTITLE: o->subtitle_disable = 1; break;
+            }
+            init_output_filter(ofilter, o, oc);
+        }
+    }
+
+    /* ffserver seeking with date=... needs a date reference */
+    if (!strcmp(file_oformat->name, "ffm") &&
+        av_strstart(filename, "http:", NULL)) {
+        int err = parse_option(o, "metadata", "creation_time=now", options);
+        if (err < 0) {
+            print_error(filename, err);
+            exit_program(1);
+        }
+    }
+
+    if (!strcmp(file_oformat->name, "ffm") && !override_ffserver &&
+        av_strstart(filename, "http:", NULL)) {
+        int j;
+        /* special case for files sent to ffserver: we get the stream
+           parameters from ffserver */
+        int err = read_ffserver_streams(o, oc, filename);
+        if (err < 0) {
+            print_error(filename, err);
+            exit_program(1);
+        }
+        for(j = nb_output_streams - oc->nb_streams; j < nb_output_streams; j++) {
+            ost = output_streams[j];
+            for (i = 0; i < nb_input_streams; i++) {
+                ist = input_streams[i];
+                if(ist->st->codec->codec_type == ost->st->codec->codec_type){
+                    ost->sync_ist= ist;
+                    ost->source_index= i;
+                    if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
+                    if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
+                    ist->discard = 0;
+                    ist->st->discard = ist->user_set_discard;
+                    break;
+                }
+            }
+            if(!ost->sync_ist){
+                av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codec->codec_type));
+                exit_program(1);
+            }
+        }
+               //FIXME
+    } else if (!o->nb_stream_maps) {
+        char *subtitle_codec_name = NULL;
+        /* pick the "best" stream of each type */
+
+        /* video: highest resolution */
+        if (!o->video_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
+            int area = 0, idx = -1;
+            int qcr = avformat_query_codec(oc->oformat, oc->oformat->video_codec, 0);
+            for (i = 0; i < nb_input_streams; i++) {
+                int new_area;
+                ist = input_streams[i];
+                new_area = ist->st->codec->width * ist->st->codec->height + 100000000*!!ist->st->codec_info_nb_frames;
+                if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
+                    new_area = 1;
+                if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+                    new_area > area) {
+                    if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
+                        continue;
+                    area = new_area;
+                    idx = i;
+                }
+            }
+            if (idx >= 0)
+                new_video_stream(o, oc, idx);
+        }
+
+        /* audio: most channels */
+               //FIXME
+        if (!o->audio_disable && av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
+            int best_score = 0, idx = -1;
+            for (i = 0; i < nb_input_streams; i++) {
+                int score;
+                ist = input_streams[i];
+                score = ist->st->codec->channels + 100000000*!!ist->st->codec_info_nb_frames;
+                if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
+                    score > best_score) {
+                    best_score = score;
+                    idx = i;
+                }
+            }
+            if (idx >= 0)
+                new_audio_stream(o, oc, idx);
+        }
+
+        /* subtitles: pick first */
+        MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, "s");
+               //FIXME
+        if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
+            for (i = 0; i < nb_input_streams; i++)
+                if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+                    AVCodecDescriptor const *input_descriptor =
+                        avcodec_descriptor_get(input_streams[i]->st->codec->codec_id);
+                    AVCodecDescriptor const *output_descriptor = NULL;
+                    AVCodec const *output_codec =
+                        avcodec_find_encoder(oc->oformat->subtitle_codec);
+                    int input_props = 0, output_props = 0;
+                    if (output_codec)
+                        output_descriptor = avcodec_descriptor_get(output_codec->id);
+                    if (input_descriptor)
+                        input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
+                    if (output_descriptor)
+                        output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
+                    if (subtitle_codec_name ||
+                        input_props & output_props ||
+                        // Map dvb teletext which has neither property to any output subtitle encoder
+                        input_descriptor && output_descriptor &&
+                        (!input_descriptor->props ||
+                         !output_descriptor->props)) {
+                        new_subtitle_stream(o, oc, i);
+                        break;
+                    }
+                }
+        }
+        /* Data only if codec id match */
+               //FIXME
+        if (!o->data_disable ) {
+            enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
+            for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
+                if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
+                    && input_streams[i]->st->codec->codec_id == codec_id )
+                    new_data_stream(o, oc, i);
+            }
+        }
+    } else {
+               //FIXME
+        for (i = 0; i < o->nb_stream_maps; i++) {
+            StreamMap *map = &o->stream_maps[i];
+
+            if (map->disabled)
+                continue;
+
+            if (map->linklabel) {
+                FilterGraph *fg;
+                OutputFilter *ofilter = NULL;
+                int j, k;
+
+                for (j = 0; j < nb_filtergraphs; j++) {
+                    fg = filtergraphs[j];
+                    for (k = 0; k < fg->nb_outputs; k++) {
+                        AVFilterInOut *out = fg->outputs[k]->out_tmp;
+                        if (out && !strcmp(out->name, map->linklabel)) {
+                            ofilter = fg->outputs[k];
+                            goto loop_end;
+                        }
+                    }
+                }
+loop_end:
+                if (!ofilter) {
+                    av_log(NULL, AV_LOG_FATAL, "Output with label '%s' does not exist "
+                           "in any defined filter graph, or was already used elsewhere.\n", map->linklabel);
+                    exit_program(1);
+                }
+                init_output_filter(ofilter, o, oc);
+            } else {
+                int src_idx = input_files[map->file_index]->ist_index + map->stream_index;
+
+                ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
+                               //FIXME
+                if(o->subtitle_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
+                    continue;
+                if(o->   audio_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+                    continue;
+                if(o->   video_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+                    continue;
+                if(o->    data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA)
+                    continue;
+
+                ost = NULL;
+                switch (ist->st->codec->codec_type) {
+                case AVMEDIA_TYPE_VIDEO:      ost = new_video_stream     (o, oc, src_idx); break;
+                case AVMEDIA_TYPE_AUDIO:      ost = new_audio_stream     (o, oc, src_idx); break;
+                case AVMEDIA_TYPE_SUBTITLE:   ost = new_subtitle_stream  (o, oc, src_idx); break;
+                case AVMEDIA_TYPE_DATA:       ost = new_data_stream      (o, oc, src_idx); break;
+                case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc, src_idx); break;
+                case AVMEDIA_TYPE_UNKNOWN:
+                    if (copy_unknown_streams) {
+                        ost = new_unknown_stream   (o, oc, src_idx);
+                        break;
+                    }
+                default:
+                    av_log(NULL, ignore_unknown_streams ? AV_LOG_WARNING : AV_LOG_FATAL,
+                           "Cannot map stream #%d:%d - unsupported type.\n",
+                           map->file_index, map->stream_index);
+                    if (!ignore_unknown_streams) {
+                        av_log(NULL, AV_LOG_FATAL,
+                               "If you want unsupported types ignored instead "
+                               "of failing, please use the -ignore_unknown option\n"
+                               "If you want them copied, please use -copy_unknown\n");
+                        exit_program(1);
+                    }
+                }
+                if (ost)
+                    ost->sync_ist = input_streams[  input_files[map->sync_file_index]->ist_index
+                                                  + map->sync_stream_index];
+            }
+        }
+    }
+
+    /* handle attached files */
+       //FIXME
+    for (i = 0; i < o->nb_attachments; i++) {
+        AVIOContext *pb;
+        uint8_t *attachment;
+        const char *p;
+        int64_t len;
+
+        if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
+            av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
+                   o->attachments[i]);
+            exit_program(1);
+        }
+        if ((len = avio_size(pb)) <= 0) {
+            av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
+                   o->attachments[i]);
+            exit_program(1);
+        }
+        if (!(attachment = av_malloc(len))) {
+            av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
+                   o->attachments[i]);
+            exit_program(1);
+        }
+        avio_read(pb, attachment, len);
+
+        ost = new_attachment_stream(o, oc, -1);
+        ost->stream_copy               = 1;
+        ost->attachment_filename       = o->attachments[i];
+        ost->finished                  = 1;
+        ost->st->codec->extradata      = attachment;
+        ost->st->codec->extradata_size = len;
+
+        p = strrchr(o->attachments[i], '/');
+        av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
+        avio_closep(&pb);
+    }
+
+    for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
+        AVDictionaryEntry *e;
+        ost = output_streams[i];
+
+        if ((ost->stream_copy || ost->attachment_filename)
+            && (e = av_dict_get(o->g->codec_opts, "flags", NULL, AV_DICT_IGNORE_SUFFIX))
+            && (!e->key[5] || check_stream_specifier(oc, ost->st, e->key+6)))
+            if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
+                exit_program(1);
+    }
+
+    if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
+        av_dump_format(oc, nb_output_files - 1, oc->filename, 1);
+        av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", nb_output_files - 1);
+        exit_program(1);
+    }
+
+    /* check if all codec options have been used */
+       //FIXME
+    unused_opts = strip_specifiers(o->g->codec_opts);
+    for (i = of->ost_index; i < nb_output_streams; i++) {
+        e = NULL;
+        while ((e = av_dict_get(output_streams[i]->encoder_opts, "", e,
+                                AV_DICT_IGNORE_SUFFIX)))
+            av_dict_set(&unused_opts, e->key, NULL, 0);
+    }
+
+    e = NULL;
+    while ((e = av_dict_get(unused_opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+        const AVClass *class = avcodec_get_class();
+        const AVOption *option = av_opt_find(&class, e->key, NULL, 0,
+                                             AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
+        const AVClass *fclass = avformat_get_class();
+        const AVOption *foption = av_opt_find(&fclass, e->key, NULL, 0,
+                                              AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ);
+        if (!option || foption)
+            continue;
+
+
+        if (!(option->flags & AV_OPT_FLAG_ENCODING_PARAM)) {
+            av_log(NULL, AV_LOG_ERROR, "Codec AVOption %s (%s) specified for "
+                   "output file #%d (%s) is not an encoding option.\n", e->key,
+                   option->help ? option->help : "", nb_output_files - 1,
+                   filename);
+            exit_program(1);
+        }
+
+        // gop_timecode is injected by generic code but not always used
+        if (!strcmp(e->key, "gop_timecode"))
+            continue;
+
+        av_log(NULL, AV_LOG_WARNING, "Codec AVOption %s (%s) specified for "
+               "output file #%d (%s) has not been used for any stream. The most "
+               "likely reason is either wrong type (e.g. a video option with "
+               "no video streams) or that it is a private option of some encoder "
+               "which was not actually used for any stream.\n", e->key,
+               option->help ? option->help : "", nb_output_files - 1, filename);
+    }
+    av_dict_free(&unused_opts);
+
+    /* set the encoding/decoding_needed flags */
+    for (i = of->ost_index; i < nb_output_streams; i++) {
+        OutputStream *ost = output_streams[i];
+
+        ost->encoding_needed = !ost->stream_copy;
+        if (ost->encoding_needed && ost->source_index >= 0) {
+            InputStream *ist = input_streams[ost->source_index];
+            ist->decoding_needed |= DECODING_FOR_OST;
+        }
+    }
+
+    /* check filename in case of an image number is expected */
+    if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
+        if (!av_filename_number_test(oc->filename)) {
+            print_error(oc->filename, AVERROR(EINVAL));
+            exit_program(1);
+        }
+    }
+
+    if (!(oc->oformat->flags & AVFMT_NOSTREAMS) && !input_stream_potentially_available) {
+        av_log(NULL, AV_LOG_ERROR,
+               "No input streams but output needs an input stream\n");
+        exit_program(1);
+    }
+
+    if (!(oc->oformat->flags & AVFMT_NOFILE)) {
+        /* test if it already exists to avoid losing precious files */
+        assert_file_overwrite(filename);
+
+        /* open the file */
+        if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
+                              &oc->interrupt_callback,
+                              &of->opts)) < 0) {
+            print_error(filename, err);
+            exit_program(1);
+        }
+    } else if (strcmp(oc->oformat->name, "image2")==0 && !av_filename_number_test(filename))
+        assert_file_overwrite(filename);
+       //FIXME
+    if (o->mux_preload) {
+        av_dict_set_int(&of->opts, "preload", o->mux_preload*AV_TIME_BASE, 0);
+    }
+    oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
+
+    /* copy metadata */
+    for (i = 0; i < o->nb_metadata_map; i++) {
+        char *p;
+        int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
+
+        if (in_file_index >= nb_input_files) {
+            av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
+            exit_program(1);
+        }
+        copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc,
+                      in_file_index >= 0 ?
+                      input_files[in_file_index]->ctx : NULL, o);
+    }
+
+    /* copy chapters */
+       //FIXME
+    if (o->chapters_input_file >= nb_input_files) {
+        if (o->chapters_input_file == INT_MAX) {
+            /* copy chapters from the first input file that has them*/
+            o->chapters_input_file = -1;
+            for (i = 0; i < nb_input_files; i++)
+                if (input_files[i]->ctx->nb_chapters) {
+                    o->chapters_input_file = i;
+                    break;
+                }
+        } else {
+            av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
+                   o->chapters_input_file);
+            exit_program(1);
+        }
+    }
+    if (o->chapters_input_file >= 0)
+        copy_chapters(input_files[o->chapters_input_file], of,
+                      !o->metadata_chapters_manual);
+
+    /* copy global metadata by default */
+    if (!o->metadata_global_manual && nb_input_files){
+        av_dict_copy(&oc->metadata, input_files[0]->ctx->metadata,
+                     AV_DICT_DONT_OVERWRITE);
+        if(o->recording_time != INT64_MAX)
+            av_dict_set(&oc->metadata, "duration", NULL, 0);
+        av_dict_set(&oc->metadata, "creation_time", NULL, 0);
+    }
+    if (!o->metadata_streams_manual)
+        for (i = of->ost_index; i < nb_output_streams; i++) {
+            InputStream *ist;
+            if (output_streams[i]->source_index < 0)         /* this is true e.g. for attached files */
+                continue;
+            ist = input_streams[output_streams[i]->source_index];
+            av_dict_copy(&output_streams[i]->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
+            if (!output_streams[i]->stream_copy) {
+                av_dict_set(&output_streams[i]->st->metadata, "encoder", NULL, 0);
+                if (ist->autorotate)
+                    av_dict_set(&output_streams[i]->st->metadata, "rotate", NULL, 0);
+            }
+        }
+
+    /* process manually set programs */
+    for (i = 0; i < o->nb_program; i++) {
+        const char *p = o->program[i].u.str;
+        int progid = i+1;
+        AVProgram *program;
+
+        while(*p) {
+            const char *p2 = av_get_token(&p, ":");
+            const char *to_dealloc = p2;
+            char *key;
+            if (!p2)
+                break;
+
+            if(*p) p++;
+
+            key = av_get_token(&p2, "=");
+            if (!key || !*p2) {
+                av_freep(&to_dealloc);
+                av_freep(&key);
+                break;
+            }
+            p2++;
+
+            if (!strcmp(key, "program_num"))
+                progid = strtol(p2, NULL, 0);
+            av_freep(&to_dealloc);
+            av_freep(&key);
+        }
+
+        program = av_new_program(oc, progid);
+
+        p = o->program[i].u.str;
+        while(*p) {
+            const char *p2 = av_get_token(&p, ":");
+            const char *to_dealloc = p2;
+            char *key;
+            if (!p2)
+                break;
+            if(*p) p++;
+
+            key = av_get_token(&p2, "=");
+            if (!key) {
+                av_log(NULL, AV_LOG_FATAL,
+                       "No '=' character in program string %s.\n",
+                       p2);
+                exit_program(1);
+            }
+            if (!*p2)
+                exit_program(1);
+            p2++;
+
+            if (!strcmp(key, "title")) {
+                av_dict_set(&program->metadata, "title", p2, 0);
+            } else if (!strcmp(key, "program_num")) {
+            } else if (!strcmp(key, "st")) {
+                int st_num = strtol(p2, NULL, 0);
+                av_program_add_stream_index(oc, progid, st_num);
+            } else {
+                av_log(NULL, AV_LOG_FATAL, "Unknown program key %s.\n", key);
+                exit_program(1);
+            }
+            av_freep(&to_dealloc);
+            av_freep(&key);
+        }
+    }
+
+    /* process manually set metadata */
+       //FIXME
+    for (i = 0; i < o->nb_metadata; i++) {
+        AVDictionary **m;
+        char type, *val;
+        const char *stream_spec;
+        int index = 0, j, ret = 0;
+        char now_time[256];
+
+        val = strchr(o->metadata[i].u.str, '=');
+        if (!val) {
+            av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
+                   o->metadata[i].u.str);
+            exit_program(1);
+        }
+        *val++ = 0;
+
+        if (!strcmp(o->metadata[i].u.str, "creation_time") &&
+            !strcmp(val, "now")) {
+            time_t now = time(0);
+            struct tm *ptm, tmbuf;
+            ptm = localtime_r(&now, &tmbuf);
+            if (ptm) {
+                if (strftime(now_time, sizeof(now_time), "%Y-%m-%d %H:%M:%S", ptm))
+                    val = now_time;
+            }
+        }
+
+        parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
+        if (type == 's') {
+            for (j = 0; j < oc->nb_streams; j++) {
+                ost = output_streams[nb_output_streams - oc->nb_streams + j];
+                if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
+                    av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
+                    if (!strcmp(o->metadata[i].u.str, "rotate")) {
+                        ost->rotate_overridden = 1;
+                    }
+                } else if (ret < 0)
+                    exit_program(1);
+            }
+        }
+        else {
+            switch (type) {
+            case 'g':
+                m = &oc->metadata;
+                break;
+            case 'c':
+                if (index < 0 || index >= oc->nb_chapters) {
+                    av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
+                    exit_program(1);
+                }
+                m = &oc->chapters[index]->metadata;
+                break;
+            case 'p':
+                if (index < 0 || index >= oc->nb_programs) {
+                    av_log(NULL, AV_LOG_FATAL, "Invalid program index %d in metadata specifier.\n", index);
+                    exit_program(1);
+                }
+                m = &oc->programs[index]->metadata;
+                break;
+            default:
+                av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
+                exit_program(1);
+            }
+            av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
+        }
+    }
+
+    return 0;
+}
+static void uninit_options(OptionsContext *o)
+{
+    const OptionDef *po = options;
+    int i;
+
+    /* all OPT_SPEC and OPT_STRING can be freed in generic way */
+    while (po->name) {
+        void *dst = (uint8_t*)o + po->u.off;
+
+        if (po->flags & OPT_SPEC) {
+            SpecifierOpt **so = dst;
+            int i, *count = (int*)(so + 1);
+            for (i = 0; i < *count; i++) {
+                av_freep(&(*so)[i].specifier);
+                if (po->flags & OPT_STRING)
+                    av_freep(&(*so)[i].u.str);
+            }
+            av_freep(so);
+            *count = 0;
+        } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
+            av_freep(dst);
+        po++;
+    }
+
+    for (i = 0; i < o->nb_stream_maps; i++)
+        av_freep(&o->stream_maps[i].linklabel);
+    av_freep(&o->stream_maps);
+    av_freep(&o->audio_channel_maps);
+    av_freep(&o->streamid_map);
+    av_freep(&o->attachments);
+}
+
+static void init_options(OptionsContext *o)
+{
+    memset(o, 0, sizeof(*o));
+
+    o->stop_time = INT64_MAX;
+    o->mux_max_delay  = 0.7;
+    o->start_time     = AV_NOPTS_VALUE;
+    o->start_time_eof = AV_NOPTS_VALUE;
+    o->recording_time = INT64_MAX;
+    o->limit_filesize = UINT64_MAX;
+    o->chapters_input_file = INT_MAX;
+    o->accurate_seek  = 1;
+}
+
+static int open_files(OptionGroupList *l, const char *inout,
+                      int (*open_file)(OptionsContext*, const char*))
+{
+    int i, ret;
+
+    for (i = 0; i < l->nb_groups; i++) {
+        OptionGroup *g = &l->groups[i];
+        OptionsContext o;
+
+        init_options(&o);
+        o.g = g;
+
+        ret = parse_optgroup(&o, g);
+        if (ret < 0) {
+            av_log(NULL, AV_LOG_ERROR, "Error parsing options for %s file "
+                   "%s.\n", inout, g->arg);
+            return ret;
+        }
+
+        av_log(NULL, AV_LOG_DEBUG, "Opening an %s file: %s.\n", inout, g->arg);
+        ret = open_file(&o, g->arg);
+        uninit_options(&o);
+        if (ret < 0) {
+            av_log(NULL, AV_LOG_ERROR, "Error opening %s file %s.\n",
+                   inout, g->arg);
+            return ret;
+        }
+        av_log(NULL, AV_LOG_DEBUG, "Successfully opened the file.\n");
+    }
+
+    return 0;
+}
+
+
+static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
+{
+    AVStream *st;
+    OutputStream *ost;
+    AVCodecContext *video_enc;
+    char *frame_rate = NULL, *frame_aspect_ratio = NULL;
+
+    ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO, source_index);
+    st  = ost->st;
+    video_enc = ost->enc_ctx;
+
+    MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
+    if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
+        av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
+        exit_program(1);
+    }
+    if (frame_rate && video_sync_method == VSYNC_PASSTHROUGH)
+        av_log(NULL, AV_LOG_ERROR, "Using -vsync 0 and -r can produce invalid output files\n");
+
+    MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
+    if (frame_aspect_ratio) {
+        AVRational q;
+        if (av_parse_ratio(&q, frame_aspect_ratio, 255, 0, NULL) < 0 ||
+            q.num <= 0 || q.den <= 0) {
+            av_log(NULL, AV_LOG_FATAL, "Invalid aspect ratio: %s\n", frame_aspect_ratio);
+            exit_program(1);
+        }
+        ost->frame_aspect_ratio = q;
+    }
+
+    MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
+    MATCH_PER_STREAM_OPT(filters,        str, ost->filters,        oc, st);
+
+    if (!ost->stream_copy) {
+        const char *p = NULL;
+        char *frame_size = NULL;
+        char *frame_pix_fmt = NULL;
+        char *intra_matrix = NULL, *inter_matrix = NULL;
+        char *chroma_intra_matrix = NULL;
+        int do_pass = 0;
+        int i;
+
+        MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
+        if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
+            av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
+            exit_program(1);
+        }
+
+        video_enc->bits_per_raw_sample = frame_bits_per_raw_sample;
+        MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
+        if (frame_pix_fmt && *frame_pix_fmt == '+') {
+            ost->keep_pix_fmt = 1;
+            if (!*++frame_pix_fmt)
+                frame_pix_fmt = NULL;
+        }
+        if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == AV_PIX_FMT_NONE) {
+            av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
+            exit_program(1);
+        }
+        st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
+
+        if (intra_only)
+            video_enc->gop_size = 0;
+        MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
+        if (intra_matrix) {
+            if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
+                av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
+                exit_program(1);
+            }
+            parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
+        }
+        MATCH_PER_STREAM_OPT(chroma_intra_matrices, str, chroma_intra_matrix, oc, st);
+        if (chroma_intra_matrix) {
+            uint16_t *p = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64);
+            if (!p) {
+                av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
+                exit_program(1);
+            }
+            av_codec_set_chroma_intra_matrix(video_enc, p);
+            parse_matrix_coeffs(p, chroma_intra_matrix);
+        }
+        MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
+        if (inter_matrix) {
+            if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
+                av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
+                exit_program(1);
+            }
+            parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
+        }
+
+        MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
+        for (i = 0; p; i++) {
+            int start, end, q;
+            int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
+            if (e != 3) {
+                av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
+                exit_program(1);
+            }
+            video_enc->rc_override =
+                av_realloc_array(video_enc->rc_override,
+                                 i + 1, sizeof(RcOverride));
+            if (!video_enc->rc_override) {
+                av_log(NULL, AV_LOG_FATAL, "Could not (re)allocate memory for rc_override.\n");
+                exit_program(1);
+            }
+            video_enc->rc_override[i].start_frame = start;
+            video_enc->rc_override[i].end_frame   = end;
+            if (q > 0) {
+                video_enc->rc_override[i].qscale         = q;
+                video_enc->rc_override[i].quality_factor = 1.0;
+            }
+            else {
+                video_enc->rc_override[i].qscale         = 0;
+                video_enc->rc_override[i].quality_factor = -q/100.0;
+            }
+            p = strchr(p, '/');
+            if (p) p++;
+        }
+        video_enc->rc_override_count = i;
+
+        if (do_psnr)
+            video_enc->flags|= AV_CODEC_FLAG_PSNR;
+
+        /* two pass mode */
+        MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st);
+        if (do_pass) {
+            if (do_pass & 1) {
+                video_enc->flags |= AV_CODEC_FLAG_PASS1;
+                av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND);
+            }
+            if (do_pass & 2) {
+                video_enc->flags |= AV_CODEC_FLAG_PASS2;
+                av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND);
+            }
+        }
+
+        MATCH_PER_STREAM_OPT(passlogfiles, str, ost->logfile_prefix, oc, st);
+        if (ost->logfile_prefix &&
+            !(ost->logfile_prefix = av_strdup(ost->logfile_prefix)))
+            exit_program(1);
+
+        if (do_pass) {
+            char logfilename[1024];
+            FILE *f;
+
+            snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
+                     ost->logfile_prefix ? ost->logfile_prefix :
+                                           DEFAULT_PASS_LOGFILENAME_PREFIX,
+                     i);
+            if (!strcmp(ost->enc->name, "libx264")) {
+                av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
+            } else {
+                if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
+                    char  *logbuffer = read_file(logfilename);
+
+                    if (!logbuffer) {
+                        av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
+                               logfilename);
+                        exit_program(1);
+                    }
+                    video_enc->stats_in = logbuffer;
+                }
+                if (video_enc->flags & AV_CODEC_FLAG_PASS1) {
+                    f = av_fopen_utf8(logfilename, "wb");
+                    if (!f) {
+                        av_log(NULL, AV_LOG_FATAL,
+                               "Cannot write log file '%s' for pass-1 encoding: %s\n",
+                               logfilename, strerror(errno));
+                        exit_program(1);
+                    }
+                    ost->logfile = f;
+                }
+            }
+        }
+
+        MATCH_PER_STREAM_OPT(forced_key_frames, str, ost->forced_keyframes, oc, st);
+        if (ost->forced_keyframes)
+            ost->forced_keyframes = av_strdup(ost->forced_keyframes);
+
+        MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
+
+        ost->top_field_first = -1;
+        MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
+
+
+        ost->avfilter = get_ost_filters(o, oc, ost);
+        if (!ost->avfilter)
+            exit_program(1);
+    } else {
+        MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
+    }
+
+    if (ost->stream_copy)
+        check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_VIDEO);
+
+    return ost;
+}
+
+static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc, int source_index)
+{
+    int n;
+    AVStream *st;
+    OutputStream *ost;
+    AVCodecContext *audio_enc;
+
+    ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO, source_index);
+    st  = ost->st;
+
+    audio_enc = ost->enc_ctx;
+    audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
+
+    MATCH_PER_STREAM_OPT(filter_scripts, str, ost->filters_script, oc, st);
+    MATCH_PER_STREAM_OPT(filters,        str, ost->filters,        oc, st);
+
+    if (!ost->stream_copy) {
+        char *sample_fmt = NULL;
+
+        MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
+
+        MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
+        if (sample_fmt &&
+            (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
+            av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
+            exit_program(1);
+        }
+
+        MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
+
+        MATCH_PER_STREAM_OPT(apad, str, ost->apad, oc, st);
+        ost->apad = av_strdup(ost->apad);
+
+        ost->avfilter = get_ost_filters(o, oc, ost);
+        if (!ost->avfilter)
+            exit_program(1);
+
+        /* check for channel mapping for this audio stream */
+               //FIXME
+        for (n = 0; n < o->nb_audio_channel_maps; n++) {
+            AudioChannelMap *map = &o->audio_channel_maps[n];
+            if ((map->ofile_idx   == -1 || ost->file_index == map->ofile_idx) &&
+                (map->ostream_idx == -1 || ost->st->index  == map->ostream_idx)) {
+                InputStream *ist;
+
+                if (map->channel_idx == -1) {
+                    ist = NULL;
+                } else if (ost->source_index < 0) {
+                    av_log(NULL, AV_LOG_FATAL, "Cannot determine input stream for channel mapping %d.%d\n",
+                           ost->file_index, ost->st->index);
+                    continue;
+                } else {
+                    ist = input_streams[ost->source_index];
+                }
+
+                if (!ist || (ist->file_index == map->file_idx && ist->st->index == map->stream_idx)) {
+                    if (av_reallocp_array(&ost->audio_channels_map,
+                                          ost->audio_channels_mapped + 1,
+                                          sizeof(*ost->audio_channels_map)
+                                          ) < 0 )
+                        exit_program(1);
+
+                    ost->audio_channels_map[ost->audio_channels_mapped++] = map->channel_idx;
+                }
+            }
+        }
+    }
+
+    if (ost->stream_copy)
+        check_streamcopy_filters(o, oc, ost, AVMEDIA_TYPE_AUDIO);
+
+    return ost;
+}
+void parse_options(void *optctx, int argc, char **argv, const OptionDef *options,
+                   void (*parse_arg_function)(void *, const char*))
+{
+    const char *opt;
+    int optindex, handleoptions = 1, ret;
+
+    /* perform system-dependent conversions for arguments list */
+    prepare_app_arguments(&argc, &argv);
+
+    /* parse options */
+    optindex = 1;
+    while (optindex < argc) {
+        opt = argv[optindex++];
+
+        if (handleoptions && opt[0] == '-' && opt[1] != '\0') {
+            if (opt[1] == '-' && opt[2] == '\0') {
+                handleoptions = 0;
+                continue;
+            }
+            opt++;
+
+            if ((ret = parse_option(optctx, opt, argv[optindex], options)) < 0)
+                exit_program(1);
+            optindex += ret;
+        } else {
+            if (parse_arg_function)
+                parse_arg_function(optctx, opt);
+        }
+    }
+}
+
+int parse_optgroup(void *optctx, OptionGroup *g)
+{
+    int i, ret;
+
+    av_log(NULL, AV_LOG_DEBUG, "Parsing a group of options: %s %s.\n",
+           g->group_def->name, g->arg);
+
+    for (i = 0; i < g->nb_opts; i++) {
+        Option *o = &g->opts[i];
+
+        if (g->group_def->flags &&
+                       // FIXME
+            !(g->group_def->flags & o->opt->flags)) {
+            av_log(NULL, AV_LOG_ERROR, "Option %s (%s) cannot be applied to "
+                   "%s %s -- you are trying to apply an input option to an "
+                   "output file or vice versa. Move this option before the "
+                   "file it belongs to.\n", o->key, o->opt->help,
+                   g->group_def->name, g->arg);
+            return AVERROR(EINVAL);
+        }
+               //FIXME
+        av_log(NULL, AV_LOG_DEBUG, "Applying option %s (%s) with argument %s.\n",
+               o->key, o->opt->help, o->val);
+               //FIXME
+        ret = write_option(optctx, o->opt, o->key, o->val);
+        if (ret < 0)
+            return ret;
+    }
+
+    av_log(NULL, AV_LOG_DEBUG, "Successfully parsed a group of options.\n");
+
+    return 0;
+}
+
+// From ffmpeg 3.0.2
+int MOVIE_SAVER::transcode_init(void)
+{
+    int ret = 0, i, j, k;
+    AVFormatContext *oc;
+    OutputStream *ost;
+    InputStream *ist;
+    char error[1024] = {0};
+    int want_sdp = 1;
+
+//    for (i = 0; i < nb_filtergraphs; i++) {
+//        FilterGraph *fg = filtergraphs[i];
+//        for (j = 0; j < fg->nb_outputs; j++) {
+//            OutputFilter *ofilter = fg->outputs[j];
+//            if (!ofilter->ost || ofilter->ost->source_index >= 0)
+//                continue;
+//            if (fg->nb_inputs != 1)
+//                continue;
+//            for (k = nb_input_streams-1; k >= 0 ; k--)
+//                if (fg->inputs[0]->ist == input_streams[k])
+//                    break;
+//            ofilter->ost->source_index = k;
+//        }
+//    }
+
+    /* init framerate emulation */
+//    for (i = 0; i < nb_input_files; i++) {
+//        InputFile *ifile = input_files[i];
+//        if (ifile->rate_emu)
+//            for (j = 0; j < ifile->nb_streams; j++)
+//                input_streams[j + ifile->ist_index]->start = av_gettime_relative();
+//    }
+
+       //nb_output_streams = 2; // Audio and Video
+       nb_output_streams = 1; // Audio only
+    /* for each output stream, we compute the right encoding parameters */
+    for (i = 0; i < nb_output_streams; i++) {
+        AVCodecContext *enc_ctx;
+        AVCodecContext *dec_ctx = NULL;
+        ost = output_streams[i];
+        oc  = output_files[ost->file_index]->ctx;
+        //ist = get_input_stream(ost);
+               ist = NULL;
+        if (ost->attachment_filename)
+            continue;
+
+        enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
+
+        if (ist) {
+            dec_ctx = ist->dec_ctx;
+
+            ost->st->disposition          = ist->st->disposition;
+            enc_ctx->bits_per_raw_sample    = dec_ctx->bits_per_raw_sample;
+            enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
+        } else {
+                       for (j=0; j< oc->nb_streams; j++) {
+                AVStream *st = oc->streams[j];
+                if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
+                    break;
+            }
+            if (j == oc->nb_streams)
+                if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
+                    ost->st->disposition = AV_DISPOSITION_DEFAULT;
+               }
+               
+               {
+            if (!ost->enc)
+                ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
+            if (!ost->enc) {
+                /* should only happen when a default codec is not present. */
+                snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
+                         avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
+                ret = AVERROR(EINVAL);
+                goto dump_format;
+            }
+
+            set_encoder_id(output_files[ost->file_index], ost);
+
+//#if CONFIG_LIBMFX
+//            if (qsv_transcode_init(ost))
+//                exit_program(1);
+//#endif
+                       if (!ost->filter &&
+                               (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+                 enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
+                               FilterGraph *fg;
+                               fg = init_simple_filtergraph(ist, ost);
+                               if (configure_filtergraph(fg)) {
+                                       av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
+                                       exit_program(1);
+                               }
+            }
+
+            if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+                if (!ost->frame_rate.num)
+                    ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
+                //if (ist && !ost->frame_rate.num)
+                //    ost->frame_rate = ist->framerate;
+                //if (ist && !ost->frame_rate.num)
+                //    ost->frame_rate = ist->st->r_frame_rate;
+                if (!ost->frame_rate.num) {
+                    ost->frame_rate = (AVRational){rec_fps, 1};
+                    av_log(NULL, AV_LOG_WARNING,
+                           "No information "
+                           "about the input framerate is available. Falling "
+                           "back to a default value of %d fps for output stream #%d:%d. Use the -r option "
+                           "if you want a different framerate.\n",
+                           rec_fps, ost->file_index, ost->index);
+                }
+                //ost->frame_rate = (AVRational){25, 1};
+                if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
+                    int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
+                    ost->frame_rate = ost->enc->supported_framerates[idx];
+                }
+                // reduce frame rate for mpeg4 to be within the spec limits
+                if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
+                    av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
+                              ost->frame_rate.num, ost->frame_rate.den, 65535);
+                }
+            }
+
+            switch (enc_ctx->codec_type) {
+            case AVMEDIA_TYPE_AUDIO:
+                enc_ctx->sample_fmt     = ost->filter->filter->inputs[0]->format;
+                enc_ctx->sample_rate    = ost->filter->filter->inputs[0]->sample_rate;
+                enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
+                enc_ctx->channels       = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
+                enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
+                break;
+            case AVMEDIA_TYPE_VIDEO:
+                enc_ctx->time_base = av_inv_q(ost->frame_rate);
+                if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
+                    enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
+                if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
+                   && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
+                    av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
+                                               "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
+                }
+                for (j = 0; j < ost->forced_kf_count; j++)
+                    ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
+                                                         AV_TIME_BASE_Q,
+                                                         enc_ctx->time_base);
+
+                enc_ctx->width  = ost->filter->filter->inputs[0]->w;
+                enc_ctx->height = ost->filter->filter->inputs[0]->h;
+                enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
+                    ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
+                    av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
+                    ost->filter->filter->inputs[0]->sample_aspect_ratio;
+                if (!strncmp(ost->enc->name, "libx264", 7) &&
+                    enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+                    av_log(NULL, AV_LOG_WARNING,
+                           "No pixel format specified, %s for H.264 encoding chosen.\n"
+                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+                if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
+                    enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+                    av_log(NULL, AV_LOG_WARNING,
+                           "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
+                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+                enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
+
+                ost->st->avg_frame_rate = ost->frame_rate;
+
+                if (!dec_ctx ||
+                    enc_ctx->width   != dec_ctx->width  ||
+                    enc_ctx->height  != dec_ctx->height ||
+                    enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
+                    enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
+                }
+
+                if (ost->forced_keyframes) {
+                    if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
+                        ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
+                                            forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
+                        if (ret < 0) {
+                            av_log(NULL, AV_LOG_ERROR,
+                                   "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
+                            return ret;
+                        }
+                        ost->forced_keyframes_expr_const_values[FKF_N] = 0;
+                        ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
+                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
+                        ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
+
+                        // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
+                        // parse it only for static kf timings
+                    } else if(strncmp(ost->forced_keyframes, "source", 6)) {
+                        parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
+                    }
+                }
+                break;
+//            case AVMEDIA_TYPE_SUBTITLE:
+//                enc_ctx->time_base = (AVRational){1, 1000};
+//                if (!enc_ctx->width) {
+//                    enc_ctx->width     = input_streams[ost->source_index]->st->codec->width;
+//                    enc_ctx->height    = input_streams[ost->source_index]->st->codec->height;
+//                }
+//                break;
+//            case AVMEDIA_TYPE_DATA:
+//                break;
+//            default:
+//                abort();
+//                break;
+            }
+        }
+
+        if (ost->disposition) {
+            static const AVOption opts[] = {
+                { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
+                { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
+                { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
+                { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
+                { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
+                { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
+                { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
+                { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
+                { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
+                { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
+                { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
+                { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
+                { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
+                { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
+                { NULL },
+            };
+            static const AVClass class = {
+                .class_name = "",
+                .item_name  = av_default_item_name,
+                .option     = opts,
+                .version    = LIBAVUTIL_VERSION_INT,
+            };
+            const AVClass *pclass = &class;
+
+            ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
+            if (ret < 0)
+                goto dump_format;
+        }
+    }
+
+    /* open each encoder */
+    for (i = 0; i < nb_output_streams; i++) {
+        ret = init_output_stream(output_streams[i], error, sizeof(error));
+        if (ret < 0)
+            goto dump_format;
+    }
+
+    /* open files and write file headers */
+    for (i = 0; i < nb_output_files; i++) {
+        oc = output_files[i]->ctx;
+        oc->interrupt_callback = int_cb;
+        if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
+            snprintf(error, sizeof(error),
+                     "Could not write header for output file #%d "
+                     "(incorrect codec parameters ?): %s",
+                     i, av_err2str(ret));
+            ret = AVERROR(EINVAL);
+            goto dump_format;
+        }
+//         assert_avoptions(output_files[i]->opts);
+        if (strcmp(oc->oformat->name, "rtp")) {
+            want_sdp = 0;
+        }
+    }
+
+dump_format:
+    /* dump the file output parameters - cannot be done before in case
+       of stream copy */
+    for (i = 0; i < nb_output_files; i++) {
+        av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
+    }
+
+    for (i = 0; i < nb_output_streams; i++) {
+        ost = output_streams[i];
+
+        if (ost->attachment_filename) {
+            /* an attached file */
+            av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
+                   ost->attachment_filename, ost->file_index, ost->index);
+            continue;
+        }
+
+        if (ost->filter && ost->filter->graph->graph_desc) {
+            /* output from a complex graph */
+            av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
+            if (nb_filtergraphs > 1)
+                av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
+
+            av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
+                   ost->index, ost->enc ? ost->enc->name : "?");
+            continue;
+        }
+
+        //av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
+        //       input_streams[ost->source_index]->file_index,
+        //       input_streams[ost->source_index]->st->index,
+        //       ost->file_index,
+        //       ost->index);
+        if (ost->stream_copy)
+            av_log(NULL, AV_LOG_INFO, " (copy)");
+        else {
+            //const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
+            const AVCodec *in_codec    = NULL;
+            const AVCodec *out_codec   = ost->enc;
+            const char *decoder_name   = "?";
+            const char *in_codec_name  = "?";
+            const char *encoder_name   = "?";
+            const char *out_codec_name = "?";
+            const AVCodecDescriptor *desc;
+
+            if (in_codec) {
+                decoder_name  = in_codec->name;
+                desc = avcodec_descriptor_get(in_codec->id);
+                if (desc)
+                    in_codec_name = desc->name;
+                if (!strcmp(decoder_name, in_codec_name))
+                    decoder_name = "native";
+            }
+
+            if (out_codec) {
+                encoder_name   = out_codec->name;
+                desc = avcodec_descriptor_get(out_codec->id);
+                if (desc)
+                    out_codec_name = desc->name;
+                if (!strcmp(encoder_name, out_codec_name))
+                    encoder_name = "native";
+            }
+
+            av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
+                   in_codec_name, decoder_name,
+                   out_codec_name, encoder_name);
+        }
+        av_log(NULL, AV_LOG_INFO, "\n");
+    }
+
+    if (ret) {
+        av_log(NULL, AV_LOG_ERROR, "%s\n", error);
+        return ret;
+    }
+
+    //if (sdp_filename || want_sdp) {
+    //    print_sdp();
+    //}
+
+    transcode_init_done = true;
+
+    return 0;
+}
+
+
 #if defined(USE_MOVIE_SAVER) && defined(USE_LIBAV)
 static const AVRational time_base_15 = (AVRational){1001, 14485};
 static const AVRational time_base_24 = (AVRational){1001, 23976};
@@ -32,8 +1603,12 @@ bool MOVIE_SAVER::setup_context(QString filename, int fps)
        
        avformat_alloc_output_context2(&output_context, NULL, NULL, filename.toLocal8Bit().constData());
        if(output_context == NULL) {
-               AGAR_DebugLog(AGAR_LOG_DEBUG, "AVC ERROR: Failed to get output_context");
-               return false;
+               AGAR_DebugLog(AGAR_LOG_DEBUG, "AVC WARINING: Failed to get output_context; Try to use fallback  MP4");
+               avformat_alloc_output_context2(&output_context, NULL, "mp4", filename.toLocal8Bit().constData());
+               if(output_context == NULL) {
+                       AGAR_DebugLog(AGAR_LOG_DEBUG, "AVC ERROR: Failed to get output_context even fallback.");
+                       return false;
+               }
        }
        
        output_context->oformat = format;
@@ -71,9 +1646,9 @@ bool MOVIE_SAVER::setup_audio_codec(void *_opt)
         nb_samples = c->frame_size;
 
     audio_frame_data     = (AVFrame *)alloc_audio_frame(c->sample_fmt, c->channel_layout,
-                                                                                        c->sample_rate, nb_samples);
-    audio_tmp_frame = (AVFrame *)alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
-                                       c->sample_rate, nb_samples);
+                                                                                                               c->sample_rate, nb_samples);
+//    audio_tmp_frame = (AVFrame *)alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
+//                                                                                                c->sample_rate, nb_samples);
 #endif
     /* create resampler context */
        return true;
@@ -186,58 +1761,7 @@ void MOVIE_SAVER::do_open(QString filename, int fps)
        audio_option = NULL;
        video_option = NULL;    
        do_set_record_fps(fps);
-
        _filename = filename;
-       if(!setup_context(filename, fps)) {
-               do_close();
-               return;
-       }
-#if defined(USE_LIBAV)
-    //if (format->video_codec != AV_CODEC_ID_NONE) {
-       //  add_stream_video(&video_stream, oc, &video_codec, format->video_codec);
-    //    have_video = true;
-       //   encode_video = true;
-    //}
-    if (format->audio_codec != AV_CODEC_ID_NONE) {
-        //add_stream_audio((void **)(&audio_codec->codec), (int)(format->audio_codec));
-        add_stream_audio((void **)(&audio_codec), (int)AV_CODEC_ID_AAC);
-        have_audio = true;
-        encode_audio = true;
-    }
-//    if (have_video)
-//        open_video(oc, video_codec, &video_st, opt);
-//    if (have_audio)
-//        open_audio(oc, audio_codec, &audio_st, opt);
-
-       audio_codec_context = audio_stream->codec;
-       if(!setup_audio_codec(&audio_option)) {
-               do_close();
-               return;
-       }
-       if(!setup_audio_resampler()) {
-               do_close();
-               return;
-       }
-       output_context->bit_rate = 100080 * 1000;
-       output_context->audio_preload = AV_TIME_BASE / 10;
-       output_context->max_delay = 100 * 1000; // MAX 100ms delay;
-       
-       if(avio_open(&(output_context->pb), filename.toLocal8Bit().constData(), AVIO_FLAG_WRITE) < 0) {
-               AGAR_DebugLog(AGAR_LOG_DEBUG, "AVC ERROR: Failed to open file");
-               do_close();
-               return;
-       }               
-
-       av_dump_format(output_context, 0, filename.toLocal8Bit().constData(), 1);
-       //av_dump_format(output_context, 1, NULL, 1);
-       AGAR_DebugLog(AGAR_LOG_DEBUG, "MOVIE/Saver: Successfully opened AVC stream.");
-    /* Write the stream header, if any. */
-    ret = avformat_write_header(output_context, &audio_option);
-    if (ret < 0) {
-        AGAR_DebugLog(AGAR_LOG_DEBUG, "MOVIE/Saver: Error occurred when opening output header\n");
-        return;
-    }
 
-#endif // defined(USE_LIBAV)
        recording = true;
 }