OSDN Git Service

Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Wed, 16 May 2012 00:27:31 +0000 (02:27 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Wed, 16 May 2012 00:27:31 +0000 (02:27 +0200)
* qatar/master: (26 commits)
  fate: use diff -b in oneline comparison
  Add missing version bumps and APIchanges/Changelog entries.
  lavfi: move buffer management function to a separate file.
  lavfi: move formats-related functions from default.c to formats.c
  lavfi: move video-related functions to a separate file.
  fate: make smjpeg a demux test
  fate: separate sierra-vmd audio and video tests
  fate: separate smacker audio and video tests
  libmp3lame: set supported channel layouts.
  avconv: automatically insert asyncts when -async is used.
  avconv: add support for audio filters.
  lavfi: add asyncts filter.
  lavfi: add aformat filter
  lavfi: add an audio buffer sink.
  lavfi: add an audio buffer source.
  buffersrc: add av_buffersrc_write_frame().
  buffersrc: fix invalid read in uninit if the fifo hasn't been allocated
  lavfi: rename vsrc_buffer.c to buffersrc.c
  avfiltergraph: reindent
  lavfi: add channel layout/sample rate negotiation.
  ...

Conflicts:
Changelog
doc/APIchanges
doc/filters.texi
ffmpeg.c
ffprobe.c
libavcodec/libmp3lame.c
libavfilter/Makefile
libavfilter/af_aformat.c
libavfilter/allfilters.c
libavfilter/avfilter.c
libavfilter/avfilter.h
libavfilter/avfiltergraph.c
libavfilter/buffersrc.c
libavfilter/defaults.c
libavfilter/formats.c
libavfilter/src_buffer.c
libavfilter/version.h
libavfilter/vf_yadif.c
libavfilter/vsrc_buffer.c
libavfilter/vsrc_buffer.h
libavutil/avutil.h
tests/fate/audio.mak
tests/fate/demux.mak
tests/fate/video.mak

Merged-by: Michael Niedermayer <michaelni@gmx.at>
43 files changed:
1  2 
Changelog
doc/APIchanges
doc/ffmpeg.texi
doc/filters.texi
ffprobe.c
libavcodec/libmp3lame.c
libavfilter/Makefile
libavfilter/af_aconvert.c
libavfilter/af_aformat.c
libavfilter/af_amerge.c
libavfilter/af_aresample.c
libavfilter/af_astreamsync.c
libavfilter/af_earwax.c
libavfilter/af_pan.c
libavfilter/af_silencedetect.c
libavfilter/af_volume.c
libavfilter/allfilters.c
libavfilter/asrc_aevalsrc.c
libavfilter/avcodec.c
libavfilter/avfilter.c
libavfilter/avfilter.h
libavfilter/avfiltergraph.c
libavfilter/buffer.c
libavfilter/buffersink.c
libavfilter/buffersink.h
libavfilter/buffersrc.c
libavfilter/buffersrc.h
libavfilter/defaults.c
libavfilter/formats.c
libavfilter/formats.h
libavfilter/internal.h
libavfilter/sink_buffer.c
libavfilter/src_buffer.c
libavfilter/src_movie.c
libavfilter/version.h
libavfilter/vf_yadif.c
libavfilter/video.c
libavutil/avutil.h
tests/fate-run.sh
tests/fate/audio.mak
tests/fate/demux.mak
tests/fate/video.mak
tools/lavfi-showfiltfmts.c

diff --cc Changelog
+++ b/Changelog
@@@ -1,83 -1,46 +1,84 @@@
  Entries are sorted chronologically from oldest to youngest within each release,
  releases are sorted from youngest to oldest.
  
 -version <next>:
 -
 -- XWD encoder and decoder
 -- Support for fragmentation in the mov/mp4 muxer
 -- ISMV (Smooth Streaming) muxer
 +version next:
 +- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
 +- setfield filter
  - CDXL demuxer and decoder
  - Apple ProRes encoder
 +- ffprobe -count_packets and -count_frames options
  - Sun Rasterfile Encoder
 -- remove libpostproc
  - ID3v2 attached pictures reading and writing
  - WMA Lossless decoder
 -- XBM encoder
 +- bluray protocol
 +- blackdetect filter
 +- libutvideo encoder wrapper (--enable-libutvideo)
 +- swapuv filter
 +- bbox filter
 +- XBM encoder and decoder
  - RealAudio Lossless decoder
  - ZeroCodec decoder
 -- drop support for avconv without libavfilter
 -- add libavresample audio conversion library
 +- tile video filter
 +- Metal Gear Solid: The Twin Snakes demuxer
 +- OpenEXR image decoder
 +- removelogo filter
 +- drop support for ffmpeg without libavfilter
 +- drawtext video filter: fontconfig support
 +- ffmpeg -benchmark_all option
 +- super2xsai filter ported from libmpcodecs
 +- add libavresample audio conversion library for compatibility
 +- MicroDVD decoder
 +- Avid Meridien (AVUI) decoder
 +- accept + prefix to -pix_fmt option to disable automatic conversions.
+ - audio filters support in libavfilter and avconv
  
  
 -version 0.8:
 -
 +version 0.10:
 +- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
 +         CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
 +         CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
 +         CVE-2011-3950, CVE-2011-3951, CVE-2011-3952
 +- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
 +- SBaGen (SBG) binaural beats script demuxer
 +- OpenMG Audio muxer
 +- Timecode extraction in DV and MOV
 +- thumbnail video filter
 +- XML output in ffprobe
 +- asplit audio filter
 +- tinterlace video filter
 +- astreamsync audio filter
 +- amerge audio filter
 +- ISMV (Smooth Streaming) muxer
  - GSM audio parser
  - SMJPEG muxer
 -
 -
 -version 0.8_beta2:
 -
 +- XWD encoder and decoder
  - Automatic thread count based on detection number of (available) CPU cores
 -- Deprecate libpostproc. If desired, the switch --enable-postproc will
 -  enable it but it may be removed in a later Libav release.
 +- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
 +- ffprobe -show_error option
 +- Avid 1:1 10-bit RGB Packer codec
 +- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
 +- yuv4 libquicktime packed 4:2:0 encoder and decoder
 +- ffprobe -show_frames option
 +- silencedetect audio filter
 +- ffprobe -show_program_version, -show_library_versions, -show_versions options
  - rv34: frame-level multi-threading
  - optimized iMDCT transform on x86 using SSE for for mpegaudiodec
 +- Improved PGS subtitle decoder
 +- dumpgraph option to lavfi device
 +- r210 and r10k encoders
 +- ffwavesynth decoder
 +- aviocat tool
 +- ffeval tool
  
  
 -version 0.8_beta1:
 +version 0.9:
  
 +- openal input device added
 +- boxblur filter added
  - BWF muxer
  - Flash Screen Video 2 decoder
 -- ffplay/ffprobe/ffserver renamed to avplay/avprobe/avserver
 -- ffmpeg deprecated, added avconv, which is almost the same for now, except
 +- lavfi input device added
 +- added avconv, which is almost the same for now, except
  for a few incompatible changes in the options, which will hopefully make them
  easier to use. The changes are:
      * The options placement is now strictly enforced! While in theory the
diff --cc doc/APIchanges
@@@ -15,19 -13,21 +15,33 @@@ libavutil:     2011-04-1
  
  API changes, most recent first:
  
- 2012-xx-xx - xxxxxxx - lavc 54.13.1
 +2012-05-07 - xxxxxxx - lavf 54.5.100
 +  Add av_guess_sample_aspect_ratio() function.
 +
 +2012-04-20 - xxxxxxx - lavfi 2.70.100
 +  Add avfilter_unref_bufferp() to avfilter.h.
 +
 +2012-04-12 - xxxxxxx - lavfi 2.68.100
 +  Install libavfilter/asrc_abuffer.h public header.
 +
 +2012-03-26 - a67d9cf - lavfi 2.66.100
 +  Add avfilter_fill_frame_from_{audio_,}buffer_ref() functions.
 +
+ 2012-05-15 - lavfi 2.17.0
+   Add support for audio filters
+   ac71230/a2cd9be - add video/audio buffer sink in a new installed
+                     header buffersink.h
+   720c6b7 - add av_buffersrc_write_frame(), deprecate
+             av_vsrc_buffer_add_frame()
+   ab16504 - add avfilter_copy_buf_props()
+   9453c9e - add extended_data to AVFilterBuffer
+   1b8c927 - add avfilter_get_audio_buffer_ref_from_arrays()
+ 2012-05-09 - lavu 51.30.0 - samplefmt.h
+   142e740 - add av_samples_copy()
+   6d7f617 - add av_samples_set_silence()
+ 2012-05-09 - a5117a2 - lavc 54.13.1
    For audio formats with fixed frame size, the last frame
    no longer needs to be padded with silence, libavcodec
    will handle this internally (effectively all encoders
diff --cc doc/ffmpeg.texi
@@@ -781,32 -610,11 +786,33 @@@ Audio sync method. "Stretches/squeezes
  the parameter is the maximum samples per second by which the audio is changed.
  -async 1 is a special case where only the start of the audio stream is corrected
  without any later correction.
+ This option has been deprecated. Use the @code{asyncts} audio filter instead.
  @item -copyts
  Copy timestamps from input to output.
 -@item -copytb
 -Copy input stream time base from input to output when stream copying.
 +@item -copytb @var{mode}
 +Specify how to set the encoder timebase when stream copying.  @var{mode} is an
 +integer numeric value, and can assume one of the following values:
 +
 +@table @option
 +@item 1
 +Use the demuxer timebase.
 +
 +The time base is copied to the output encoder from the corresponding input
 +demuxer. This is sometimes required to avoid non monotonically increasing
 +timestamps when copying video streams with variable frame rate.
 +
 +@item 0
 +Use the decoder timebase.
 +
 +The time base is copied to the output encoder from the corresponding input
 +decoder.
 +
 +@item -1
 +Try to make the choice automatically, in order to generate a sane output.
 +@end table
 +
 +Default value is -1.
 +
  @item -shortest
  Finish encoding when the shortest input stream ends.
  @item -dts_delta_threshold
@@@ -107,404 -107,60 +107,449 @@@ build
  
  Below is a description of the currently available audio filters.
  
 +@section aconvert
 +
 +Convert the input audio format to the specified formats.
 +
 +The filter accepts a string of the form:
 +"@var{sample_format}:@var{channel_layout}".
 +
 +@var{sample_format} specifies the sample format, and can be a string or the
 +corresponding numeric value defined in @file{libavutil/samplefmt.h}. Use 'p'
 +suffix for a planar sample format.
 +
 +@var{channel_layout} specifies the channel layout, and can be a string
 +or the corresponding number value defined in @file{libavutil/audioconvert.h}.
 +
 +The special parameter "auto", signifies that the filter will
 +automatically select the output format depending on the output filter.
 +
 +Some examples follow.
 +
 +@itemize
 +@item
 +Convert input to float, planar, stereo:
 +@example
 +aconvert=fltp:stereo
 +@end example
 +
 +@item
 +Convert input to unsigned 8-bit, automatically select out channel layout:
 +@example
 +aconvert=u8:auto
 +@end example
 +@end itemize
 +
 +@section aformat
 +
 +Convert the input audio to one of the specified formats. The framework will
 +negotiate the most appropriate format to minimize conversions.
 +
 +The filter accepts three lists of formats, separated by ":", in the form:
 +"@var{sample_formats}:@var{channel_layouts}:@var{packing_formats}".
 +
 +Elements in each list are separated by "," which has to be escaped in the
 +filtergraph specification.
 +
 +The special parameter "all", in place of a list of elements, signifies all
 +supported formats.
 +
 +Some examples follow:
 +@example
 +aformat=u8\\,s16:mono:packed
 +
 +aformat=s16:mono\\,stereo:all
 +@end example
 +
 +@section amerge
 +
 +Merge two audio streams into a single multi-channel stream.
 +
 +This filter does not need any argument.
 +
 +If the channel layouts of the inputs are disjoint, and therefore compatible,
 +the channel layout of the output will be set accordingly and the channels
 +will be reordered as necessary. If the channel layouts of the inputs are not
 +disjoint, the output will have all the channels of the first input then all
 +the channels of the second input, in that order, and the channel layout of
 +the output will be the default value corresponding to the total number of
 +channels.
 +
 +For example, if the first input is in 2.1 (FL+FR+LF) and the second input
 +is FC+BL+BR, then the output will be in 5.1, with the channels in the
 +following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
 +first input, b1 is the first channel of the second input).
 +
 +On the other hand, if both input are in stereo, the output channels will be
 +in the default order: a1, a2, b1, b2, and the channel layout will be
 +arbitrarily set to 4.0, which may or may not be the expected value.
 +
 +Both inputs must have the same sample rate, format and packing.
 +
 +If inputs do not have the same duration, the output will stop with the
 +shortest.
 +
 +Example: merge two mono files into a stereo stream:
 +@example
 +amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
 +@end example
 +
 +If you need to do multiple merges (for instance multiple mono audio streams in
 +a single video media), you can do:
 +@example
 +ffmpeg -f lavfi -i "
 +amovie=input.mkv:si=0 [a0];
 +amovie=input.mkv:si=1 [a1];
 +amovie=input.mkv:si=2 [a2];
 +amovie=input.mkv:si=3 [a3];
 +amovie=input.mkv:si=4 [a4];
 +amovie=input.mkv:si=5 [a5];
 +[a0][a1] amerge [x0];
 +[x0][a2] amerge [x1];
 +[x1][a3] amerge [x2];
 +[x2][a4] amerge [x3];
 +[x3][a5] amerge" -c:a pcm_s16le output.mkv
 +@end example
 +
+ @section aformat
+ Convert the input audio to one of the specified formats. The framework will
+ negotiate the most appropriate format to minimize conversions.
+ The filter accepts the following named parameters:
+ @table @option
+ @item sample_fmts
+ A comma-separated list of requested sample formats.
+ @item sample_rates
+ A comma-separated list of requested sample rates.
+ @item channel_layouts
+ A comma-separated list of requested channel layouts.
+ @end table
+ If a parameter is omitted, all values are allowed.
+ For example to force the output to either unsigned 8-bit or signed 16-bit stereo:
+ @example
+ aformat=sample_fmts\=u8\,s16:channel_layouts\=stereo
+ @end example
  @section anull
  
  Pass the audio source unchanged to the output.
  
 +@section aresample
 +
 +Resample the input audio to the specified sample rate.
 +
 +The filter accepts exactly one parameter, the output sample rate. If not
 +specified then the filter will automatically convert between its input
 +and output sample rates.
 +
 +For example, to resample the input audio to 44100Hz:
 +@example
 +aresample=44100
 +@end example
 +
 +@section ashowinfo
 +
 +Show a line containing various information for each input audio frame.
 +The input audio is not modified.
 +
 +The shown line contains a sequence of key/value pairs of the form
 +@var{key}:@var{value}.
 +
 +A description of each shown parameter follows:
 +
 +@table @option
 +@item n
 +sequential number of the input frame, starting from 0
 +
 +@item pts
 +presentation TimeStamp of the input frame, expressed as a number of
 +time base units. The time base unit depends on the filter input pad, and
 +is usually 1/@var{sample_rate}.
 +
 +@item pts_time
 +presentation TimeStamp of the input frame, expressed as a number of
 +seconds
 +
 +@item pos
 +position of the frame in the input stream, -1 if this information in
 +unavailable and/or meaningless (for example in case of synthetic audio)
 +
 +@item fmt
 +sample format name
 +
 +@item chlayout
 +channel layout description
 +
 +@item nb_samples
 +number of samples (per each channel) contained in the filtered frame
 +
 +@item rate
 +sample rate for the audio frame
 +
 +@item planar
 +if the packing format is planar, 0 if packed
 +
 +@item checksum
 +Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame
 +
 +@item plane_checksum
 +Adler-32 checksum (printed in hexadecimal) for each input frame plane,
 +expressed in the form "[@var{c0} @var{c1} @var{c2} @var{c3} @var{c4} @var{c5}
 +@var{c6} @var{c7}]"
 +@end table
 +
 +@section asplit
 +
 +Pass on the input audio to two outputs. Both outputs are identical to
 +the input audio.
 +
 +For example:
 +@example
 +[in] asplit[out0], showaudio[out1]
 +@end example
 +
 +will create two separate outputs from the same input, one cropped and
 +one padded.
 +
 +@section astreamsync
 +
 +Forward two audio streams and control the order the buffers are forwarded.
 +
 +The argument to the filter is an expression deciding which stream should be
 +forwarded next: if the result is negative, the first stream is forwarded; if
 +the result is positive or zero, the second stream is forwarded. It can use
 +the following variables:
 +
 +@table @var
 +@item b1 b2
 +number of buffers forwarded so far on each stream
 +@item s1 s2
 +number of samples forwarded so far on each stream
 +@item t1 t2
 +current timestamp of each stream
 +@end table
 +
 +The default value is @code{t1-t2}, which means to always forward the stream
 +that has a smaller timestamp.
 +
 +Example: stress-test @code{amerge} by randomly sending buffers on the wrong
 +input, while avoiding too much of a desynchronization:
 +@example
 +amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
 +[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
 +[a2] [b2] amerge
 +@end example
 +
 +@section earwax
 +
 +Make audio easier to listen to on headphones.
 +
 +This filter adds `cues' to 44.1kHz stereo (i.e. audio CD format) audio
 +so that when listened to on headphones the stereo image is moved from
 +inside your head (standard for headphones) to outside and in front of
 +the listener (standard for speakers).
 +
 +Ported from SoX.
 +
 +@section pan
 +
 +Mix channels with specific gain levels. The filter accepts the output
 +channel layout followed by a set of channels definitions.
 +
 +This filter is also designed to remap efficiently the channels of an audio
 +stream.
 +
 +The filter accepts parameters of the form:
 +"@var{l}:@var{outdef}:@var{outdef}:..."
 +
 +@table @option
 +@item l
 +output channel layout or number of channels
 +
 +@item outdef
 +output channel specification, of the form:
 +"@var{out_name}=[@var{gain}*]@var{in_name}[+[@var{gain}*]@var{in_name}...]"
 +
 +@item out_name
 +output channel to define, either a channel name (FL, FR, etc.) or a channel
 +number (c0, c1, etc.)
 +
 +@item gain
 +multiplicative coefficient for the channel, 1 leaving the volume unchanged
 +
 +@item in_name
 +input channel to use, see out_name for details; it is not possible to mix
 +named and numbered input channels
 +@end table
 +
 +If the `=' in a channel specification is replaced by `<', then the gains for
 +that specification will be renormalized so that the total is 1, thus
 +avoiding clipping noise.
 +
 +@subsection Mixing examples
 +
 +For example, if you want to down-mix from stereo to mono, but with a bigger
 +factor for the left channel:
 +@example
 +pan=1:c0=0.9*c0+0.1*c1
 +@end example
 +
 +A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
 +7-channels surround:
 +@example
 +pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
 +@end example
 +
 +Note that @command{ffmpeg} integrates a default down-mix (and up-mix) system
 +that should be preferred (see "-ac" option) unless you have very specific
 +needs.
 +
 +@subsection Remapping examples
 +
 +The channel remapping will be effective if, and only if:
 +
 +@itemize
 +@item gain coefficients are zeroes or ones,
 +@item only one input per channel output,
 +@end itemize
 +
 +If all these conditions are satisfied, the filter will notify the user ("Pure
 +channel mapping detected"), and use an optimized and lossless method to do the
 +remapping.
 +
 +For example, if you have a 5.1 source and want a stereo audio stream by
 +dropping the extra channels:
 +@example
 +pan="stereo: c0=FL : c1=FR"
 +@end example
 +
 +Given the same source, you can also switch front left and front right channels
 +and keep the input channel layout:
 +@example
 +pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
 +@end example
 +
 +If the input is a stereo audio stream, you can mute the front left channel (and
 +still keep the stereo channel layout) with:
 +@example
 +pan="stereo:c1=c1"
 +@end example
 +
 +Still with a stereo audio stream input, you can copy the right channel in both
 +front left and right:
 +@example
 +pan="stereo: c0=FR : c1=FR"
 +@end example
 +
 +@section silencedetect
 +
 +Detect silence in an audio stream.
 +
 +This filter logs a message when it detects that the input audio volume is less
 +or equal to a noise tolerance value for a duration greater or equal to the
 +minimum detected noise duration.
 +
 +The printed times and duration are expressed in seconds.
 +
 +@table @option
 +@item duration, d
 +Set silence duration until notification (default is 2 seconds).
 +
 +@item noise, n
 +Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
 +specified value) or amplitude ratio. Default is -60dB, or 0.001.
 +@end table
 +
 +Detect 5 seconds of silence with -50dB noise tolerance:
 +@example
 +silencedetect=n=-50dB:d=5
 +@end example
 +
 +Complete example with @command{ffmpeg} to detect silence with 0.0001 noise
 +tolerance in @file{silence.mp3}:
 +@example
 +ffmpeg -f lavfi -i amovie=silence.mp3,silencedetect=noise=0.0001 -f null -
 +@end example
 +
 +@section volume
 +
 +Adjust the input audio volume.
 +
 +The filter accepts exactly one parameter @var{vol}, which expresses
 +how the audio volume will be increased or decreased.
 +
 +Output values are clipped to the maximum value.
 +
 +If @var{vol} is expressed as a decimal number, the output audio
 +volume is given by the relation:
 +@example
 +@var{output_volume} = @var{vol} * @var{input_volume}
 +@end example
 +
 +If @var{vol} is expressed as a decimal number followed by the string
 +"dB", the value represents the requested change in decibels of the
 +input audio power, and the output audio volume is given by the
 +relation:
 +@example
 +@var{output_volume} = 10^(@var{vol}/20) * @var{input_volume}
 +@end example
 +
 +Otherwise @var{vol} is considered an expression and its evaluated
 +value is used for computing the output audio volume according to the
 +first relation.
 +
 +Default value for @var{vol} is 1.0.
 +
 +@subsection Examples
 +
 +@itemize
 +@item
 +Half the input audio volume:
 +@example
 +volume=0.5
 +@end example
 +
 +The above example is equivalent to:
 +@example
 +volume=1/2
 +@end example
 +
 +@item
 +Decrease input audio power by 12 decibels:
 +@example
 +volume=-12dB
 +@end example
 +@end itemize
 +
+ @section asyncts
+ Synchronize audio data with timestamps by squeezing/stretching it and/or
+ dropping samples/adding silence when needed.
+ The filter accepts the following named parameters:
+ @table @option
+ @item compensate
+ Enable stretching/squeezing the data to make it match the timestamps.
+ @item min_delta
+ Minimum difference between timestamps and audio data (in seconds) to trigger
+ adding/dropping samples.
+ @item max_comp
+ Maximum compensation in samples per second.
+ @end table
  @section resample
  Convert the audio sample format, sample rate and channel layout. This filter is
 -not meant to be used directly, it is inserted automatically by libavfilter
 -whenever conversion is needed. Use the @var{aformat} filter to force a specific
 -conversion.
 +not meant to be used directly.
  
  @c man end AUDIO FILTERS
  
@@@ -707,20 -187,42 +752,47 @@@ Check the channel_layout_map definitio
  @file{libavcodec/audioconvert.c} for the mapping between strings and
  channel layout values.
  
 +@item nb_samples, n
 +Set the number of samples per requested frames.
 +
 +@end table
 +
  Follow some examples:
  @example
 -#  set the sample rate to 48000 Hz and the channel layout to CH_LAYOUT_MONO.
 -anullsrc=48000:4
 +#  set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
 +anullsrc=r=48000:cl=4
  
  # same as
 -anullsrc=48000:mono
 +anullsrc=r=48000:cl=mono
  @end example
  
+ @section abuffer
+ Buffer audio frames, and make them available to the filter chain.
+ This source is not intended to be part of user-supplied graph descriptions but
+ for insertion by calling programs through the interface defined in
+ @file{libavfilter/buffersrc.h}.
+ It accepts the following named parameters:
+ @table @option
+ @item time_base
+ Timebase which will be used for timestamps of submitted frames. It must be
+ either a floating-point number or in @var{numerator}/@var{denominator} form.
+ @item sample_rate
+ Audio sample rate.
+ @item sample_fmt
+ Name of the sample format, as returned by @code{av_get_sample_fmt_name()}.
+ @item channel_layout
+ Channel layout of the audio data, in the form that can be accepted by
+ @code{av_get_channel_layout()}.
+ @end table
+ All the parameters need to be explicitly defined.
  @c man end AUDIO SOURCES
  
  @chapter Audio Sinks
@@@ -745,109 -236,24 +817,116 @@@ Null audio sink, do absolutely nothing 
  mainly useful as a template and to be employed in analysis / debugging
  tools.
  
 -@section abuffersink
 -This sink is intended for programmatic use. Frames that arrive on this sink can
 -be retrieved by the calling program using the interface defined in
 -@file{libavfilter/buffersink.h}.
++@section abuffersink
++This sink is intended for programmatic use. Frames that arrive on this sink can
++be retrieved by the calling program using the interface defined in
++@file{libavfilter/buffersink.h}.
++
++This filter accepts no parameters.
++
 +@c man end AUDIO SINKS
 +
 +@chapter Video Filters
 +@c man begin VIDEO FILTERS
 +
 +When you configure your FFmpeg build, you can disable any of the
 +existing filters using @code{--disable-filters}.
 +The configure output will show the video filters included in your
 +build.
 +
 +Below is a description of the currently available video filters.
 +
 +@section ass
 +
 +Draw ASS (Advanced Substation Alpha) subtitles on top of input video
 +using the libass library.
 +
 +To enable compilation of this filter you need to configure FFmpeg with
 +@code{--enable-libass}.
 +
 +This filter accepts the syntax: @var{ass_filename}[:@var{options}],
 +where @var{ass_filename} is the filename of the ASS file to read, and
 +@var{options} is an optional sequence of @var{key}=@var{value} pairs,
 +separated by ":".
 +
 +A description of the accepted options follows.
 +
 +@table @option
 +@item original_size
 +Specifies the size of the original video, the video for which the ASS file
 +was composed. Due to a misdesign in ASS aspect ratio arithmetic, this is
 +necessary to correctly scale the fonts if the aspect ratio has been changed.
 +@end table
 +
 +For example, to render the file @file{sub.ass} on top of the input
 +video, use the command:
 +@example
 +ass=sub.ass
 +@end example
 +
 +@section bbox
 +
 +Compute the bounding box for the non-black pixels in the input frame
 +luminance plane.
 +
 +This filter computes the bounding box containing all the pixels with a
 +luminance value greater than the minimum allowed value.
 +The parameters describing the bounding box are printed on the filter
 +log.
 +
 +@section blackdetect
 +
 +Detect video intervals that are (almost) completely black. Can be
 +useful to detect chapter transitions, commercials, or invalid
 +recordings. Output lines contains the time for the start, end and
 +duration of the detected black interval expressed in seconds.
 +
 +In order to display the output lines, you need to set the loglevel at
 +least to the AV_LOG_INFO value.
 +
 +This filter accepts a list of options in the form of
 +@var{key}=@var{value} pairs separated by ":". A description of the
 +accepted options follows.
 +
 +@table @option
 +@item black_min_duration, d
 +Set the minimum detected black duration expressed in seconds. It must
 +be a non-negative floating point number.
 +
 +Default value is 2.0.
 +
 +@item picture_black_ratio_th, pic_th
 +Set the threshold for considering a picture "black".
 +Express the minimum value for the ratio:
 +@example
 +@var{nb_black_pixels} / @var{nb_pixels}
 +@end example
 +
 +for which a picture is considered black.
 +Default value is 0.98.
  
 -This filter accepts no parameters.
 +@item pixel_black_th, pix_th
 +Set the threshold for considering a pixel "black".
  
 -@c man end AUDIO SINKS
 +The threshold expresses the maximum pixel luminance value for which a
 +pixel is considered "black". The provided value is scaled according to
 +the following equation:
 +@example
 +@var{absolute_threshold} = @var{luminance_minimum_value} + @var{pixel_black_th} * @var{luminance_range_size}
 +@end example
  
 -@chapter Video Filters
 -@c man begin VIDEO FILTERS
 +@var{luminance_range_size} and @var{luminance_minimum_value} depend on
 +the input video format, the range is [0-255] for YUV full-range
 +formats and [16-235] for YUV non full-range formats.
  
 -When you configure your Libav build, you can disable any of the
 -existing filters using --disable-filters.
 -The configure output will show the video filters included in your
 -build.
 +Default value is 0.10.
 +@end table
  
 -Below is a description of the currently available video filters.
 +The following example sets the maximum pixel threshold to the minimum
 +value, and detects only black intervals of 2 or more seconds:
 +@example
 +blackdetect=d=2:pix_th=0.00
 +@end example
  
  @section blackframe
  
diff --cc ffprobe.c
index b1b909d,0000000..5009d58
mode 100644,000000..100644
--- /dev/null
+++ b/ffprobe.c
@@@ -1,1809 -1,0 +1,1823 @@@
-         for (i = 0; i < fmt_ctx->nb_streams; i++)
-             if (fmt_ctx->streams[i]->codec->codec_id != CODEC_ID_NONE)
-                 avcodec_close(fmt_ctx->streams[i]->codec);
-         avformat_close_input(&fmt_ctx);
 +/*
 + * Copyright (c) 2007-2010 Stefano Sabatini
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * simple media prober based on the FFmpeg libraries
 + */
 +
 +#include "config.h"
 +#include "version.h"
 +
 +#include "libavformat/avformat.h"
 +#include "libavcodec/avcodec.h"
 +#include "libavutil/avstring.h"
 +#include "libavutil/bprint.h"
 +#include "libavutil/opt.h"
 +#include "libavutil/pixdesc.h"
 +#include "libavutil/dict.h"
 +#include "libavutil/timecode.h"
 +#include "libavdevice/avdevice.h"
 +#include "libswscale/swscale.h"
 +#include "libswresample/swresample.h"
 +#include "libpostproc/postprocess.h"
 +#include "cmdutils.h"
 +
 +const char program_name[] = "ffprobe";
 +const int program_birth_year = 2007;
 +
 +static int do_count_frames = 0;
 +static int do_count_packets = 0;
 +static int do_read_frames  = 0;
 +static int do_read_packets = 0;
 +static int do_show_error   = 0;
 +static int do_show_format  = 0;
 +static int do_show_frames  = 0;
 +static AVDictionary *fmt_entries_to_show = NULL;
 +static int do_show_packets = 0;
 +static int do_show_streams = 0;
 +static int do_show_program_version  = 0;
 +static int do_show_library_versions = 0;
 +
 +static int show_value_unit              = 0;
 +static int use_value_prefix             = 0;
 +static int use_byte_value_binary_prefix = 0;
 +static int use_value_sexagesimal_format = 0;
 +static int show_private_data            = 1;
 +
 +static char *print_format;
 +
 +static const OptionDef options[];
 +
 +/* FFprobe context */
 +static const char *input_filename;
 +static AVInputFormat *iformat = NULL;
 +
 +static const char *const binary_unit_prefixes [] = { "", "Ki", "Mi", "Gi", "Ti", "Pi" };
 +static const char *const decimal_unit_prefixes[] = { "", "K" , "M" , "G" , "T" , "P"  };
 +
 +static const char unit_second_str[]         = "s"    ;
 +static const char unit_hertz_str[]          = "Hz"   ;
 +static const char unit_byte_str[]           = "byte" ;
 +static const char unit_bit_per_second_str[] = "bit/s";
 +static uint64_t *nb_streams_packets;
 +static uint64_t *nb_streams_frames;
 +
 +void av_noreturn exit_program(int ret)
 +{
 +    av_dict_free(&fmt_entries_to_show);
 +    exit(ret);
 +}
 +
 +struct unit_value {
 +    union { double d; long long int i; } val;
 +    const char *unit;
 +};
 +
 +static char *value_string(char *buf, int buf_size, struct unit_value uv)
 +{
 +    double vald;
 +    int show_float = 0;
 +
 +    if (uv.unit == unit_second_str) {
 +        vald = uv.val.d;
 +        show_float = 1;
 +    } else {
 +        vald = uv.val.i;
 +    }
 +
 +    if (uv.unit == unit_second_str && use_value_sexagesimal_format) {
 +        double secs;
 +        int hours, mins;
 +        secs  = vald;
 +        mins  = (int)secs / 60;
 +        secs  = secs - mins * 60;
 +        hours = mins / 60;
 +        mins %= 60;
 +        snprintf(buf, buf_size, "%d:%02d:%09.6f", hours, mins, secs);
 +    } else {
 +        const char *prefix_string = "";
 +        int l;
 +
 +        if (use_value_prefix && vald > 1) {
 +            long long int index;
 +
 +            if (uv.unit == unit_byte_str && use_byte_value_binary_prefix) {
 +                index = (long long int) (log(vald)/log(2)) / 10;
 +                index = av_clip(index, 0, FF_ARRAY_ELEMS(binary_unit_prefixes) - 1);
 +                vald /= pow(2, index * 10);
 +                prefix_string = binary_unit_prefixes[index];
 +            } else {
 +                index = (long long int) (log10(vald)) / 3;
 +                index = av_clip(index, 0, FF_ARRAY_ELEMS(decimal_unit_prefixes) - 1);
 +                vald /= pow(10, index * 3);
 +                prefix_string = decimal_unit_prefixes[index];
 +            }
 +        }
 +
 +        if (show_float || (use_value_prefix && vald != (long long int)vald))
 +            l = snprintf(buf, buf_size, "%f", vald);
 +        else
 +            l = snprintf(buf, buf_size, "%lld", (long long int)vald);
 +        snprintf(buf+l, buf_size-l, "%s%s%s", *prefix_string || show_value_unit ? " " : "",
 +                 prefix_string, show_value_unit ? uv.unit : "");
 +    }
 +
 +    return buf;
 +}
 +
 +/* WRITERS API */
 +
 +typedef struct WriterContext WriterContext;
 +
 +#define WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS 1
 +#define WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER 2
 +
 +typedef struct Writer {
 +    int priv_size;                  ///< private size for the writer context
 +    const char *name;
 +
 +    int  (*init)  (WriterContext *wctx, const char *args, void *opaque);
 +    void (*uninit)(WriterContext *wctx);
 +
 +    void (*print_header)(WriterContext *ctx);
 +    void (*print_footer)(WriterContext *ctx);
 +
 +    void (*print_chapter_header)(WriterContext *wctx, const char *);
 +    void (*print_chapter_footer)(WriterContext *wctx, const char *);
 +    void (*print_section_header)(WriterContext *wctx, const char *);
 +    void (*print_section_footer)(WriterContext *wctx, const char *);
 +    void (*print_integer)       (WriterContext *wctx, const char *, long long int);
 +    void (*print_string)        (WriterContext *wctx, const char *, const char *);
 +    void (*show_tags)           (WriterContext *wctx, AVDictionary *dict);
 +    int flags;                  ///< a combination or WRITER_FLAG_*
 +} Writer;
 +
 +struct WriterContext {
 +    const AVClass *class;           ///< class of the writer
 +    const Writer *writer;           ///< the Writer of which this is an instance
 +    char *name;                     ///< name of this writer instance
 +    void *priv;                     ///< private data for use by the filter
 +    unsigned int nb_item;           ///< number of the item printed in the given section, starting at 0
 +    unsigned int nb_section;        ///< number of the section printed in the given section sequence, starting at 0
 +    unsigned int nb_chapter;        ///< number of the chapter, starting at 0
 +
 +    int is_fmt_chapter;             ///< tells if the current chapter is "format", required by the print_format_entry option
 +};
 +
 +static const char *writer_get_name(void *p)
 +{
 +    WriterContext *wctx = p;
 +    return wctx->writer->name;
 +}
 +
 +static const AVClass writer_class = {
 +    "Writer",
 +    writer_get_name,
 +    NULL,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +static void writer_close(WriterContext **wctx)
 +{
 +    if (!*wctx)
 +        return;
 +
 +    if ((*wctx)->writer->uninit)
 +        (*wctx)->writer->uninit(*wctx);
 +    av_freep(&((*wctx)->priv));
 +    av_freep(wctx);
 +}
 +
 +static int writer_open(WriterContext **wctx, const Writer *writer,
 +                       const char *args, void *opaque)
 +{
 +    int ret = 0;
 +
 +    if (!(*wctx = av_malloc(sizeof(WriterContext)))) {
 +        ret = AVERROR(ENOMEM);
 +        goto fail;
 +    }
 +
 +    if (!((*wctx)->priv = av_mallocz(writer->priv_size))) {
 +        ret = AVERROR(ENOMEM);
 +        goto fail;
 +    }
 +
 +    (*wctx)->class = &writer_class;
 +    (*wctx)->writer = writer;
 +    if ((*wctx)->writer->init)
 +        ret = (*wctx)->writer->init(*wctx, args, opaque);
 +    if (ret < 0)
 +        goto fail;
 +
 +    return 0;
 +
 +fail:
 +    writer_close(wctx);
 +    return ret;
 +}
 +
 +static inline void writer_print_header(WriterContext *wctx)
 +{
 +    if (wctx->writer->print_header)
 +        wctx->writer->print_header(wctx);
 +    wctx->nb_chapter = 0;
 +}
 +
 +static inline void writer_print_footer(WriterContext *wctx)
 +{
 +    if (wctx->writer->print_footer)
 +        wctx->writer->print_footer(wctx);
 +}
 +
 +static inline void writer_print_chapter_header(WriterContext *wctx,
 +                                               const char *chapter)
 +{
 +    if (wctx->writer->print_chapter_header)
 +        wctx->writer->print_chapter_header(wctx, chapter);
 +    wctx->nb_section = 0;
 +
 +    wctx->is_fmt_chapter = !strcmp(chapter, "format");
 +}
 +
 +static inline void writer_print_chapter_footer(WriterContext *wctx,
 +                                               const char *chapter)
 +{
 +    if (wctx->writer->print_chapter_footer)
 +        wctx->writer->print_chapter_footer(wctx, chapter);
 +    wctx->nb_chapter++;
 +}
 +
 +static inline void writer_print_section_header(WriterContext *wctx,
 +                                               const char *section)
 +{
 +    if (wctx->writer->print_section_header)
 +        wctx->writer->print_section_header(wctx, section);
 +    wctx->nb_item = 0;
 +}
 +
 +static inline void writer_print_section_footer(WriterContext *wctx,
 +                                               const char *section)
 +{
 +    if (wctx->writer->print_section_footer)
 +        wctx->writer->print_section_footer(wctx, section);
 +    wctx->nb_section++;
 +}
 +
 +static inline void writer_print_integer(WriterContext *wctx,
 +                                        const char *key, long long int val)
 +{
 +    if (!wctx->is_fmt_chapter || !fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
 +        wctx->writer->print_integer(wctx, key, val);
 +        wctx->nb_item++;
 +    }
 +}
 +
 +static inline void writer_print_string(WriterContext *wctx,
 +                                       const char *key, const char *val, int opt)
 +{
 +    if (opt && !(wctx->writer->flags & WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS))
 +        return;
 +    if (!wctx->is_fmt_chapter || !fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
 +        wctx->writer->print_string(wctx, key, val);
 +        wctx->nb_item++;
 +    }
 +}
 +
 +static void writer_print_time(WriterContext *wctx, const char *key,
 +                              int64_t ts, const AVRational *time_base)
 +{
 +    char buf[128];
 +
 +    if (!wctx->is_fmt_chapter || !fmt_entries_to_show || av_dict_get(fmt_entries_to_show, key, NULL, 0)) {
 +        if (ts == AV_NOPTS_VALUE) {
 +            writer_print_string(wctx, key, "N/A", 1);
 +        } else {
 +            double d = ts * av_q2d(*time_base);
 +            value_string(buf, sizeof(buf), (struct unit_value){.val.d=d, .unit=unit_second_str});
 +            writer_print_string(wctx, key, buf, 0);
 +        }
 +    }
 +}
 +
 +static void writer_print_ts(WriterContext *wctx, const char *key, int64_t ts)
 +{
 +    if (ts == AV_NOPTS_VALUE) {
 +        writer_print_string(wctx, key, "N/A", 1);
 +    } else {
 +        writer_print_integer(wctx, key, ts);
 +    }
 +}
 +
 +static inline void writer_show_tags(WriterContext *wctx, AVDictionary *dict)
 +{
 +    wctx->writer->show_tags(wctx, dict);
 +}
 +
 +#define MAX_REGISTERED_WRITERS_NB 64
 +
 +static const Writer *registered_writers[MAX_REGISTERED_WRITERS_NB + 1];
 +
 +static int writer_register(const Writer *writer)
 +{
 +    static int next_registered_writer_idx = 0;
 +
 +    if (next_registered_writer_idx == MAX_REGISTERED_WRITERS_NB)
 +        return AVERROR(ENOMEM);
 +
 +    registered_writers[next_registered_writer_idx++] = writer;
 +    return 0;
 +}
 +
 +static const Writer *writer_get_by_name(const char *name)
 +{
 +    int i;
 +
 +    for (i = 0; registered_writers[i]; i++)
 +        if (!strcmp(registered_writers[i]->name, name))
 +            return registered_writers[i];
 +
 +    return NULL;
 +}
 +
 +
 +/* WRITERS */
 +
 +/* Default output */
 +
 +typedef struct DefaultContext {
 +    const AVClass *class;
 +    int nokey;
 +    int noprint_wrappers;
 +} DefaultContext;
 +
 +#define OFFSET(x) offsetof(DefaultContext, x)
 +
 +static const AVOption default_options[] = {
 +    { "noprint_wrappers", "do not print headers and footers", OFFSET(noprint_wrappers), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
 +    { "nw",               "do not print headers and footers", OFFSET(noprint_wrappers), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
 +    { "nokey",          "force no key printing",     OFFSET(nokey),          AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
 +    { "nk",             "force no key printing",     OFFSET(nokey),          AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
 +    {NULL},
 +};
 +
 +static const char *default_get_name(void *ctx)
 +{
 +    return "default";
 +}
 +
 +static const AVClass default_class = {
 +    "DefaultContext",
 +    default_get_name,
 +    default_options
 +};
 +
 +static av_cold int default_init(WriterContext *wctx, const char *args, void *opaque)
 +{
 +    DefaultContext *def = wctx->priv;
 +    int err;
 +
 +    def->class = &default_class;
 +    av_opt_set_defaults(def);
 +
 +    if (args &&
 +        (err = (av_set_options_string(def, args, "=", ":"))) < 0) {
 +        av_log(wctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
 +        return err;
 +    }
 +
 +    return 0;
 +}
 +
 +static void default_print_footer(WriterContext *wctx)
 +{
 +    DefaultContext *def = wctx->priv;
 +
 +    if (!def->noprint_wrappers)
 +        printf("\n");
 +}
 +
 +static void default_print_chapter_header(WriterContext *wctx, const char *chapter)
 +{
 +    DefaultContext *def = wctx->priv;
 +
 +    if (!def->noprint_wrappers && wctx->nb_chapter)
 +        printf("\n");
 +}
 +
 +/* lame uppercasing routine, assumes the string is lower case ASCII */
 +static inline char *upcase_string(char *dst, size_t dst_size, const char *src)
 +{
 +    int i;
 +    for (i = 0; src[i] && i < dst_size-1; i++)
 +        dst[i] = av_toupper(src[i]);
 +    dst[i] = 0;
 +    return dst;
 +}
 +
 +static void default_print_section_header(WriterContext *wctx, const char *section)
 +{
 +    DefaultContext *def = wctx->priv;
 +    char buf[32];
 +
 +    if (wctx->nb_section)
 +        printf("\n");
 +    if (!def->noprint_wrappers)
 +        printf("[%s]\n", upcase_string(buf, sizeof(buf), section));
 +}
 +
 +static void default_print_section_footer(WriterContext *wctx, const char *section)
 +{
 +    DefaultContext *def = wctx->priv;
 +    char buf[32];
 +
 +    if (!def->noprint_wrappers)
 +        printf("[/%s]", upcase_string(buf, sizeof(buf), section));
 +}
 +
 +static void default_print_str(WriterContext *wctx, const char *key, const char *value)
 +{
 +    DefaultContext *def = wctx->priv;
 +    if (!def->nokey)
 +        printf("%s=", key);
 +    printf("%s\n", value);
 +}
 +
 +static void default_print_int(WriterContext *wctx, const char *key, long long int value)
 +{
 +    DefaultContext *def = wctx->priv;
 +
 +    if (!def->nokey)
 +        printf("%s=", key);
 +    printf("%lld\n", value);
 +}
 +
 +static void default_show_tags(WriterContext *wctx, AVDictionary *dict)
 +{
 +    AVDictionaryEntry *tag = NULL;
 +    while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
 +        if (!fmt_entries_to_show || (tag->key && av_dict_get(fmt_entries_to_show, tag->key, NULL, 0)))
 +            printf("TAG:");
 +        writer_print_string(wctx, tag->key, tag->value, 0);
 +    }
 +}
 +
 +static const Writer default_writer = {
 +    .name                  = "default",
 +    .priv_size             = sizeof(DefaultContext),
 +    .init                  = default_init,
 +    .print_footer          = default_print_footer,
 +    .print_chapter_header  = default_print_chapter_header,
 +    .print_section_header  = default_print_section_header,
 +    .print_section_footer  = default_print_section_footer,
 +    .print_integer         = default_print_int,
 +    .print_string          = default_print_str,
 +    .show_tags             = default_show_tags,
 +    .flags = WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS,
 +};
 +
 +/* Compact output */
 +
 +/**
 + * Escape \n, \r, \\ and sep characters contained in s, and print the
 + * resulting string.
 + */
 +static const char *c_escape_str(AVBPrint *dst, const char *src, const char sep, void *log_ctx)
 +{
 +    const char *p;
 +
 +    for (p = src; *p; p++) {
 +        switch (*src) {
 +        case '\n': av_bprintf(dst, "%s", "\\n");  break;
 +        case '\r': av_bprintf(dst, "%s", "\\r");  break;
 +        case '\\': av_bprintf(dst, "%s", "\\\\"); break;
 +        default:
 +            if (*p == sep)
 +                av_bprint_chars(dst, '\\', 1);
 +            av_bprint_chars(dst, *p, 1);
 +        }
 +    }
 +    return dst->str;
 +}
 +
 +/**
 + * Quote fields containing special characters, check RFC4180.
 + */
 +static const char *csv_escape_str(AVBPrint *dst, const char *src, const char sep, void *log_ctx)
 +{
 +    const char *p;
 +    int quote = 0;
 +
 +    /* check if input needs quoting */
 +    for (p = src; *p; p++)
 +        if (*p == '"' || *p == sep || *p == '\n' || *p == '\r')
 +            quote = 1;
 +
 +    if (quote)
 +        av_bprint_chars(dst, '\"', 1);
 +
 +    for (p = src; *p; p++) {
 +        if (*p == '"')
 +            av_bprint_chars(dst, '\"', 1);
 +        av_bprint_chars(dst, *p, 1);
 +    }
 +    if (quote)
 +        av_bprint_chars(dst, '\"', 1);
 +    return dst->str;
 +}
 +
 +static const char *none_escape_str(AVBPrint *dst, const char *src, const char sep, void *log_ctx)
 +{
 +    return src;
 +}
 +
 +typedef struct CompactContext {
 +    const AVClass *class;
 +    char *item_sep_str;
 +    char item_sep;
 +    int nokey;
 +    char *escape_mode_str;
 +    const char * (*escape_str)(AVBPrint *dst, const char *src, const char sep, void *log_ctx);
 +} CompactContext;
 +
 +#undef OFFSET
 +#define OFFSET(x) offsetof(CompactContext, x)
 +
 +static const AVOption compact_options[]= {
 +    {"item_sep", "set item separator",    OFFSET(item_sep_str),    AV_OPT_TYPE_STRING, {.str="|"},  CHAR_MIN, CHAR_MAX },
 +    {"s",        "set item separator",    OFFSET(item_sep_str),    AV_OPT_TYPE_STRING, {.str="|"},  CHAR_MIN, CHAR_MAX },
 +    {"nokey",    "force no key printing", OFFSET(nokey),           AV_OPT_TYPE_INT,    {.dbl=0},    0,        1        },
 +    {"nk",       "force no key printing", OFFSET(nokey),           AV_OPT_TYPE_INT,    {.dbl=0},    0,        1        },
 +    {"escape",   "set escape mode",       OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"},  CHAR_MIN, CHAR_MAX },
 +    {"e",        "set escape mode",       OFFSET(escape_mode_str), AV_OPT_TYPE_STRING, {.str="c"},  CHAR_MIN, CHAR_MAX },
 +    {NULL},
 +};
 +
 +static const char *compact_get_name(void *ctx)
 +{
 +    return "compact";
 +}
 +
 +static const AVClass compact_class = {
 +    "CompactContext",
 +    compact_get_name,
 +    compact_options
 +};
 +
 +static av_cold int compact_init(WriterContext *wctx, const char *args, void *opaque)
 +{
 +    CompactContext *compact = wctx->priv;
 +    int err;
 +
 +    compact->class = &compact_class;
 +    av_opt_set_defaults(compact);
 +
 +    if (args &&
 +        (err = (av_set_options_string(compact, args, "=", ":"))) < 0) {
 +        av_log(wctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
 +        return err;
 +    }
 +    if (strlen(compact->item_sep_str) != 1) {
 +        av_log(wctx, AV_LOG_ERROR, "Item separator '%s' specified, but must contain a single character\n",
 +               compact->item_sep_str);
 +        return AVERROR(EINVAL);
 +    }
 +    compact->item_sep = compact->item_sep_str[0];
 +
 +    if      (!strcmp(compact->escape_mode_str, "none")) compact->escape_str = none_escape_str;
 +    else if (!strcmp(compact->escape_mode_str, "c"   )) compact->escape_str = c_escape_str;
 +    else if (!strcmp(compact->escape_mode_str, "csv" )) compact->escape_str = csv_escape_str;
 +    else {
 +        av_log(wctx, AV_LOG_ERROR, "Unknown escape mode '%s'\n", compact->escape_mode_str);
 +        return AVERROR(EINVAL);
 +    }
 +
 +    return 0;
 +}
 +
 +static av_cold void compact_uninit(WriterContext *wctx)
 +{
 +    CompactContext *compact = wctx->priv;
 +
 +    av_freep(&compact->item_sep_str);
 +    av_freep(&compact->escape_mode_str);
 +}
 +
 +static void compact_print_section_header(WriterContext *wctx, const char *section)
 +{
 +    CompactContext *compact = wctx->priv;
 +
 +    printf("%s%c", section, compact->item_sep);
 +}
 +
 +static void compact_print_section_footer(WriterContext *wctx, const char *section)
 +{
 +    printf("\n");
 +}
 +
 +static void compact_print_str(WriterContext *wctx, const char *key, const char *value)
 +{
 +    CompactContext *compact = wctx->priv;
 +    AVBPrint buf;
 +
 +    if (wctx->nb_item) printf("%c", compact->item_sep);
 +    if (!compact->nokey)
 +        printf("%s=", key);
 +    av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +    printf("%s", compact->escape_str(&buf, value, compact->item_sep, wctx));
 +    av_bprint_finalize(&buf, NULL);
 +}
 +
 +static void compact_print_int(WriterContext *wctx, const char *key, long long int value)
 +{
 +    CompactContext *compact = wctx->priv;
 +
 +    if (wctx->nb_item) printf("%c", compact->item_sep);
 +    if (!compact->nokey)
 +        printf("%s=", key);
 +    printf("%lld", value);
 +}
 +
 +static void compact_show_tags(WriterContext *wctx, AVDictionary *dict)
 +{
 +    CompactContext *compact = wctx->priv;
 +    AVDictionaryEntry *tag = NULL;
 +    AVBPrint buf;
 +
 +    while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
 +        if (wctx->nb_item) printf("%c", compact->item_sep);
 +
 +        if (!compact->nokey) {
 +            av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +            printf("tag:%s=", compact->escape_str(&buf, tag->key, compact->item_sep, wctx));
 +            av_bprint_finalize(&buf, NULL);
 +        }
 +
 +        av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +        printf("%s", compact->escape_str(&buf, tag->value, compact->item_sep, wctx));
 +        av_bprint_finalize(&buf, NULL);
 +    }
 +}
 +
 +static const Writer compact_writer = {
 +    .name                 = "compact",
 +    .priv_size            = sizeof(CompactContext),
 +    .init                 = compact_init,
 +    .uninit               = compact_uninit,
 +    .print_section_header = compact_print_section_header,
 +    .print_section_footer = compact_print_section_footer,
 +    .print_integer        = compact_print_int,
 +    .print_string         = compact_print_str,
 +    .show_tags            = compact_show_tags,
 +    .flags = WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS,
 +};
 +
 +/* CSV output */
 +
 +static av_cold int csv_init(WriterContext *wctx, const char *args, void *opaque)
 +{
 +    return compact_init(wctx, "item_sep=,:nokey=1:escape=csv", opaque);
 +}
 +
 +static const Writer csv_writer = {
 +    .name                 = "csv",
 +    .priv_size            = sizeof(CompactContext),
 +    .init                 = csv_init,
 +    .uninit               = compact_uninit,
 +    .print_section_header = compact_print_section_header,
 +    .print_section_footer = compact_print_section_footer,
 +    .print_integer        = compact_print_int,
 +    .print_string         = compact_print_str,
 +    .show_tags            = compact_show_tags,
 +    .flags = WRITER_FLAG_DISPLAY_OPTIONAL_FIELDS,
 +};
 +
 +/* JSON output */
 +
 +typedef struct {
 +    const AVClass *class;
 +    int multiple_entries; ///< tells if the given chapter requires multiple entries
 +    int print_packets_and_frames;
 +    int indent_level;
 +    int compact;
 +    const char *item_sep, *item_start_end;
 +} JSONContext;
 +
 +#undef OFFSET
 +#define OFFSET(x) offsetof(JSONContext, x)
 +
 +static const AVOption json_options[]= {
 +    { "compact", "enable compact output", OFFSET(compact), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
 +    { "c",       "enable compact output", OFFSET(compact), AV_OPT_TYPE_INT, {.dbl=0}, 0, 1 },
 +    { NULL }
 +};
 +
 +static const char *json_get_name(void *ctx)
 +{
 +    return "json";
 +}
 +
 +static const AVClass json_class = {
 +    "JSONContext",
 +    json_get_name,
 +    json_options
 +};
 +
 +static av_cold int json_init(WriterContext *wctx, const char *args, void *opaque)
 +{
 +    JSONContext *json = wctx->priv;
 +    int err;
 +
 +    json->class = &json_class;
 +    av_opt_set_defaults(json);
 +
 +    if (args &&
 +        (err = (av_set_options_string(json, args, "=", ":"))) < 0) {
 +        av_log(wctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
 +        return err;
 +    }
 +
 +    json->item_sep       = json->compact ? ", " : ",\n";
 +    json->item_start_end = json->compact ? " "  : "\n";
 +
 +    return 0;
 +}
 +
 +static const char *json_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
 +{
 +    static const char json_escape[] = {'"', '\\', '\b', '\f', '\n', '\r', '\t', 0};
 +    static const char json_subst[]  = {'"', '\\',  'b',  'f',  'n',  'r',  't', 0};
 +    const char *p;
 +
 +    for (p = src; *p; p++) {
 +        char *s = strchr(json_escape, *p);
 +        if (s) {
 +            av_bprint_chars(dst, '\\', 1);
 +            av_bprint_chars(dst, json_subst[s - json_escape], 1);
 +        } else if ((unsigned char)*p < 32) {
 +            av_bprintf(dst, "\\u00%02x", *p & 0xff);
 +        } else {
 +            av_bprint_chars(dst, *p, 1);
 +        }
 +    }
 +    return dst->str;
 +}
 +
 +static void json_print_header(WriterContext *wctx)
 +{
 +    JSONContext *json = wctx->priv;
 +    printf("{");
 +    json->indent_level++;
 +}
 +
 +static void json_print_footer(WriterContext *wctx)
 +{
 +    JSONContext *json = wctx->priv;
 +    json->indent_level--;
 +    printf("\n}\n");
 +}
 +
 +#define JSON_INDENT() printf("%*c", json->indent_level * 4, ' ')
 +
 +static void json_print_chapter_header(WriterContext *wctx, const char *chapter)
 +{
 +    JSONContext *json = wctx->priv;
 +    AVBPrint buf;
 +
 +    if (wctx->nb_chapter)
 +        printf(",");
 +    printf("\n");
 +    json->multiple_entries = !strcmp(chapter, "packets") || !strcmp(chapter, "frames" ) ||
 +                             !strcmp(chapter, "packets_and_frames") ||
 +                             !strcmp(chapter, "streams") || !strcmp(chapter, "library_versions");
 +    if (json->multiple_entries) {
 +        JSON_INDENT();
 +        av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +        printf("\"%s\": [\n", json_escape_str(&buf, chapter, wctx));
 +        av_bprint_finalize(&buf, NULL);
 +        json->print_packets_and_frames = !strcmp(chapter, "packets_and_frames");
 +        json->indent_level++;
 +    }
 +}
 +
 +static void json_print_chapter_footer(WriterContext *wctx, const char *chapter)
 +{
 +    JSONContext *json = wctx->priv;
 +
 +    if (json->multiple_entries) {
 +        printf("\n");
 +        json->indent_level--;
 +        JSON_INDENT();
 +        printf("]");
 +    }
 +}
 +
 +static void json_print_section_header(WriterContext *wctx, const char *section)
 +{
 +    JSONContext *json = wctx->priv;
 +
 +    if (wctx->nb_section)
 +        printf(",\n");
 +    JSON_INDENT();
 +    if (!json->multiple_entries)
 +        printf("\"%s\": ", section);
 +    printf("{%s", json->item_start_end);
 +    json->indent_level++;
 +    /* this is required so the parser can distinguish between packets and frames */
 +    if (json->print_packets_and_frames) {
 +        if (!json->compact)
 +            JSON_INDENT();
 +        printf("\"type\": \"%s\"%s", section, json->item_sep);
 +    }
 +}
 +
 +static void json_print_section_footer(WriterContext *wctx, const char *section)
 +{
 +    JSONContext *json = wctx->priv;
 +
 +    printf("%s", json->item_start_end);
 +    json->indent_level--;
 +    if (!json->compact)
 +        JSON_INDENT();
 +    printf("}");
 +}
 +
 +static inline void json_print_item_str(WriterContext *wctx,
 +                                       const char *key, const char *value)
 +{
 +    AVBPrint buf;
 +
 +    av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +    printf("\"%s\":", json_escape_str(&buf, key,   wctx));
 +    av_bprint_finalize(&buf, NULL);
 +
 +    av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +    printf(" \"%s\"", json_escape_str(&buf, value, wctx));
 +    av_bprint_finalize(&buf, NULL);
 +}
 +
 +static void json_print_str(WriterContext *wctx, const char *key, const char *value)
 +{
 +    JSONContext *json = wctx->priv;
 +
 +    if (wctx->nb_item) printf("%s", json->item_sep);
 +    if (!json->compact)
 +        JSON_INDENT();
 +    json_print_item_str(wctx, key, value);
 +}
 +
 +static void json_print_int(WriterContext *wctx, const char *key, long long int value)
 +{
 +    JSONContext *json = wctx->priv;
 +    AVBPrint buf;
 +
 +    if (wctx->nb_item) printf("%s", json->item_sep);
 +    if (!json->compact)
 +        JSON_INDENT();
 +
 +    av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +    printf("\"%s\": %lld", json_escape_str(&buf, key, wctx), value);
 +    av_bprint_finalize(&buf, NULL);
 +}
 +
 +static void json_show_tags(WriterContext *wctx, AVDictionary *dict)
 +{
 +    JSONContext *json = wctx->priv;
 +    AVDictionaryEntry *tag = NULL;
 +    int is_first = 1;
 +    if (!dict)
 +        return;
 +    printf("%s", json->item_sep);
 +    if (!json->compact)
 +        JSON_INDENT();
 +    printf("\"tags\": {%s", json->item_start_end);
 +    json->indent_level++;
 +    while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
 +        if (is_first) is_first = 0;
 +        else          printf("%s", json->item_sep);
 +        if (!json->compact)
 +            JSON_INDENT();
 +        json_print_item_str(wctx, tag->key, tag->value);
 +    }
 +    json->indent_level--;
 +    printf("%s", json->item_start_end);
 +    if (!json->compact)
 +        JSON_INDENT();
 +    printf("}");
 +}
 +
 +static const Writer json_writer = {
 +    .name                 = "json",
 +    .priv_size            = sizeof(JSONContext),
 +    .init                 = json_init,
 +    .print_header         = json_print_header,
 +    .print_footer         = json_print_footer,
 +    .print_chapter_header = json_print_chapter_header,
 +    .print_chapter_footer = json_print_chapter_footer,
 +    .print_section_header = json_print_section_header,
 +    .print_section_footer = json_print_section_footer,
 +    .print_integer        = json_print_int,
 +    .print_string         = json_print_str,
 +    .show_tags            = json_show_tags,
 +    .flags = WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER,
 +};
 +
 +/* XML output */
 +
 +typedef struct {
 +    const AVClass *class;
 +    int within_tag;
 +    int multiple_entries; ///< tells if the given chapter requires multiple entries
 +    int indent_level;
 +    int fully_qualified;
 +    int xsd_strict;
 +} XMLContext;
 +
 +#undef OFFSET
 +#define OFFSET(x) offsetof(XMLContext, x)
 +
 +static const AVOption xml_options[] = {
 +    {"fully_qualified", "specify if the output should be fully qualified", OFFSET(fully_qualified), AV_OPT_TYPE_INT, {.dbl=0},  0, 1 },
 +    {"q",               "specify if the output should be fully qualified", OFFSET(fully_qualified), AV_OPT_TYPE_INT, {.dbl=0},  0, 1 },
 +    {"xsd_strict",      "ensure that the output is XSD compliant",         OFFSET(xsd_strict),      AV_OPT_TYPE_INT, {.dbl=0},  0, 1 },
 +    {"x",               "ensure that the output is XSD compliant",         OFFSET(xsd_strict),      AV_OPT_TYPE_INT, {.dbl=0},  0, 1 },
 +    {NULL},
 +};
 +
 +static const char *xml_get_name(void *ctx)
 +{
 +    return "xml";
 +}
 +
 +static const AVClass xml_class = {
 +    "XMLContext",
 +    xml_get_name,
 +    xml_options
 +};
 +
 +static av_cold int xml_init(WriterContext *wctx, const char *args, void *opaque)
 +{
 +    XMLContext *xml = wctx->priv;
 +    int err;
 +
 +    xml->class = &xml_class;
 +    av_opt_set_defaults(xml);
 +
 +    if (args &&
 +        (err = (av_set_options_string(xml, args, "=", ":"))) < 0) {
 +        av_log(wctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
 +        return err;
 +    }
 +
 +    if (xml->xsd_strict) {
 +        xml->fully_qualified = 1;
 +#define CHECK_COMPLIANCE(opt, opt_name)                                 \
 +        if (opt) {                                                      \
 +            av_log(wctx, AV_LOG_ERROR,                                  \
 +                   "XSD-compliant output selected but option '%s' was selected, XML output may be non-compliant.\n" \
 +                   "You need to disable such option with '-no%s'\n", opt_name, opt_name); \
 +            return AVERROR(EINVAL);                                     \
 +        }
 +        CHECK_COMPLIANCE(show_private_data, "private");
 +        CHECK_COMPLIANCE(show_value_unit,   "unit");
 +        CHECK_COMPLIANCE(use_value_prefix,  "prefix");
 +
 +        if (do_show_frames && do_show_packets) {
 +            av_log(wctx, AV_LOG_ERROR,
 +                   "Interleaved frames and packets are not allowed in XSD. "
 +                   "Select only one between the -show_frames and the -show_packets options.\n");
 +            return AVERROR(EINVAL);
 +        }
 +    }
 +
 +    return 0;
 +}
 +
 +static const char *xml_escape_str(AVBPrint *dst, const char *src, void *log_ctx)
 +{
 +    const char *p;
 +
 +    for (p = src; *p; p++) {
 +        switch (*p) {
 +        case '&' : av_bprintf(dst, "%s", "&amp;");  break;
 +        case '<' : av_bprintf(dst, "%s", "&lt;");   break;
 +        case '>' : av_bprintf(dst, "%s", "&gt;");   break;
 +        case '\"': av_bprintf(dst, "%s", "&quot;"); break;
 +        case '\'': av_bprintf(dst, "%s", "&apos;"); break;
 +        default: av_bprint_chars(dst, *p, 1);
 +        }
 +    }
 +
 +    return dst->str;
 +}
 +
 +static void xml_print_header(WriterContext *wctx)
 +{
 +    XMLContext *xml = wctx->priv;
 +    const char *qual = " xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' "
 +        "xmlns:ffprobe='http://www.ffmpeg.org/schema/ffprobe' "
 +        "xsi:schemaLocation='http://www.ffmpeg.org/schema/ffprobe ffprobe.xsd'";
 +
 +    printf("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
 +    printf("<%sffprobe%s>\n",
 +           xml->fully_qualified ? "ffprobe:" : "",
 +           xml->fully_qualified ? qual : "");
 +
 +    xml->indent_level++;
 +}
 +
 +static void xml_print_footer(WriterContext *wctx)
 +{
 +    XMLContext *xml = wctx->priv;
 +
 +    xml->indent_level--;
 +    printf("</%sffprobe>\n", xml->fully_qualified ? "ffprobe:" : "");
 +}
 +
 +#define XML_INDENT() printf("%*c", xml->indent_level * 4, ' ')
 +
 +static void xml_print_chapter_header(WriterContext *wctx, const char *chapter)
 +{
 +    XMLContext *xml = wctx->priv;
 +
 +    if (wctx->nb_chapter)
 +        printf("\n");
 +    xml->multiple_entries = !strcmp(chapter, "packets") || !strcmp(chapter, "frames") ||
 +                            !strcmp(chapter, "packets_and_frames") ||
 +                            !strcmp(chapter, "streams") || !strcmp(chapter, "library_versions");
 +
 +    if (xml->multiple_entries) {
 +        XML_INDENT(); printf("<%s>\n", chapter);
 +        xml->indent_level++;
 +    }
 +}
 +
 +static void xml_print_chapter_footer(WriterContext *wctx, const char *chapter)
 +{
 +    XMLContext *xml = wctx->priv;
 +
 +    if (xml->multiple_entries) {
 +        xml->indent_level--;
 +        XML_INDENT(); printf("</%s>\n", chapter);
 +    }
 +}
 +
 +static void xml_print_section_header(WriterContext *wctx, const char *section)
 +{
 +    XMLContext *xml = wctx->priv;
 +
 +    XML_INDENT(); printf("<%s ", section);
 +    xml->within_tag = 1;
 +}
 +
 +static void xml_print_section_footer(WriterContext *wctx, const char *section)
 +{
 +    XMLContext *xml = wctx->priv;
 +
 +    if (xml->within_tag)
 +        printf("/>\n");
 +    else {
 +        XML_INDENT(); printf("</%s>\n", section);
 +    }
 +}
 +
 +static void xml_print_str(WriterContext *wctx, const char *key, const char *value)
 +{
 +    AVBPrint buf;
 +
 +    if (wctx->nb_item)
 +        printf(" ");
 +    av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +    printf("%s=\"%s\"", key, xml_escape_str(&buf, value, wctx));
 +    av_bprint_finalize(&buf, NULL);
 +}
 +
 +static void xml_print_int(WriterContext *wctx, const char *key, long long int value)
 +{
 +    if (wctx->nb_item)
 +        printf(" ");
 +    printf("%s=\"%lld\"", key, value);
 +}
 +
 +static void xml_show_tags(WriterContext *wctx, AVDictionary *dict)
 +{
 +    XMLContext *xml = wctx->priv;
 +    AVDictionaryEntry *tag = NULL;
 +    int is_first = 1;
 +    AVBPrint buf;
 +
 +    xml->indent_level++;
 +    while ((tag = av_dict_get(dict, "", tag, AV_DICT_IGNORE_SUFFIX))) {
 +        if (is_first) {
 +            /* close section tag */
 +            printf(">\n");
 +            xml->within_tag = 0;
 +            is_first = 0;
 +        }
 +        XML_INDENT();
 +
 +        av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +        printf("<tag key=\"%s\"", xml_escape_str(&buf, tag->key, wctx));
 +        av_bprint_finalize(&buf, NULL);
 +
 +        av_bprint_init(&buf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +        printf(" value=\"%s\"/>\n", xml_escape_str(&buf, tag->value, wctx));
 +        av_bprint_finalize(&buf, NULL);
 +    }
 +    xml->indent_level--;
 +}
 +
 +static Writer xml_writer = {
 +    .name                 = "xml",
 +    .priv_size            = sizeof(XMLContext),
 +    .init                 = xml_init,
 +    .print_header         = xml_print_header,
 +    .print_footer         = xml_print_footer,
 +    .print_chapter_header = xml_print_chapter_header,
 +    .print_chapter_footer = xml_print_chapter_footer,
 +    .print_section_header = xml_print_section_header,
 +    .print_section_footer = xml_print_section_footer,
 +    .print_integer        = xml_print_int,
 +    .print_string         = xml_print_str,
 +    .show_tags            = xml_show_tags,
 +    .flags = WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER,
 +};
 +
 +static void writer_register_all(void)
 +{
 +    static int initialized;
 +
 +    if (initialized)
 +        return;
 +    initialized = 1;
 +
 +    writer_register(&default_writer);
 +    writer_register(&compact_writer);
 +    writer_register(&csv_writer);
 +    writer_register(&json_writer);
 +    writer_register(&xml_writer);
 +}
 +
 +#define print_fmt(k, f, ...) do {              \
 +    av_bprint_clear(&pbuf);                    \
 +    av_bprintf(&pbuf, f, __VA_ARGS__);         \
 +    writer_print_string(w, k, pbuf.str, 0);    \
 +} while (0)
 +
 +#define print_int(k, v)         writer_print_integer(w, k, v)
 +#define print_str(k, v)         writer_print_string(w, k, v, 0)
 +#define print_str_opt(k, v)     writer_print_string(w, k, v, 1)
 +#define print_time(k, v, tb)    writer_print_time(w, k, v, tb)
 +#define print_ts(k, v)          writer_print_ts(w, k, v)
 +#define print_val(k, v, u)      writer_print_string(w, k, \
 +    value_string(val_str, sizeof(val_str), (struct unit_value){.val.i = v, .unit=u}), 0)
 +#define print_section_header(s) writer_print_section_header(w, s)
 +#define print_section_footer(s) writer_print_section_footer(w, s)
 +#define show_tags(metadata)     writer_show_tags(w, metadata)
 +
 +static void show_packet(WriterContext *w, AVFormatContext *fmt_ctx, AVPacket *pkt, int packet_idx)
 +{
 +    char val_str[128];
 +    AVStream *st = fmt_ctx->streams[pkt->stream_index];
 +    AVBPrint pbuf;
 +    const char *s;
 +
 +    av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +
 +    print_section_header("packet");
 +    s = av_get_media_type_string(st->codec->codec_type);
 +    if (s) print_str    ("codec_type", s);
 +    else   print_str_opt("codec_type", "unknown");
 +    print_int("stream_index",     pkt->stream_index);
 +    print_ts  ("pts",             pkt->pts);
 +    print_time("pts_time",        pkt->pts, &st->time_base);
 +    print_ts  ("dts",             pkt->dts);
 +    print_time("dts_time",        pkt->dts, &st->time_base);
 +    print_ts  ("duration",        pkt->duration);
 +    print_time("duration_time",   pkt->duration, &st->time_base);
 +    print_val("size",             pkt->size, unit_byte_str);
 +    if (pkt->pos != -1) print_fmt    ("pos", "%"PRId64, pkt->pos);
 +    else                print_str_opt("pos", "N/A");
 +    print_fmt("flags", "%c",      pkt->flags & AV_PKT_FLAG_KEY ? 'K' : '_');
 +    print_section_footer("packet");
 +
 +    av_bprint_finalize(&pbuf, NULL);
 +    fflush(stdout);
 +}
 +
 +static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream)
 +{
 +    AVBPrint pbuf;
 +    const char *s;
 +
 +    av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +
 +    print_section_header("frame");
 +
 +    s = av_get_media_type_string(stream->codec->codec_type);
 +    if (s) print_str    ("media_type", s);
 +    else   print_str_opt("media_type", "unknown");
 +    print_int("key_frame",              frame->key_frame);
 +    print_ts  ("pkt_pts",               frame->pkt_pts);
 +    print_time("pkt_pts_time",          frame->pkt_pts, &stream->time_base);
 +    print_ts  ("pkt_dts",               frame->pkt_dts);
 +    print_time("pkt_dts_time",          frame->pkt_dts, &stream->time_base);
 +    if (frame->pkt_pos != -1) print_fmt    ("pkt_pos", "%"PRId64, frame->pkt_pos);
 +    else                      print_str_opt("pkt_pos", "N/A");
 +
 +    switch (stream->codec->codec_type) {
 +    case AVMEDIA_TYPE_VIDEO:
 +        print_int("width",                  frame->width);
 +        print_int("height",                 frame->height);
 +        s = av_get_pix_fmt_name(frame->format);
 +        if (s) print_str    ("pix_fmt", s);
 +        else   print_str_opt("pix_fmt", "unknown");
 +        if (frame->sample_aspect_ratio.num) {
 +            print_fmt("sample_aspect_ratio", "%d:%d",
 +                      frame->sample_aspect_ratio.num,
 +                      frame->sample_aspect_ratio.den);
 +        } else {
 +            print_str_opt("sample_aspect_ratio", "N/A");
 +        }
 +        print_fmt("pict_type",              "%c", av_get_picture_type_char(frame->pict_type));
 +        print_int("coded_picture_number",   frame->coded_picture_number);
 +        print_int("display_picture_number", frame->display_picture_number);
 +        print_int("interlaced_frame",       frame->interlaced_frame);
 +        print_int("top_field_first",        frame->top_field_first);
 +        print_int("repeat_pict",            frame->repeat_pict);
 +        print_int("reference",              frame->reference);
 +        break;
 +
 +    case AVMEDIA_TYPE_AUDIO:
 +        s = av_get_sample_fmt_name(frame->format);
 +        if (s) print_str    ("sample_fmt", s);
 +        else   print_str_opt("sample_fmt", "unknown");
 +        print_int("nb_samples",         frame->nb_samples);
 +        break;
 +    }
 +
 +    print_section_footer("frame");
 +
 +    av_bprint_finalize(&pbuf, NULL);
 +    fflush(stdout);
 +}
 +
 +static av_always_inline int get_decoded_frame(AVFormatContext *fmt_ctx,
 +                                              AVFrame *frame, int *got_frame,
 +                                              AVPacket *pkt)
 +{
 +    AVCodecContext *dec_ctx = fmt_ctx->streams[pkt->stream_index]->codec;
 +    int ret = 0;
 +
 +    *got_frame = 0;
 +    switch (dec_ctx->codec_type) {
 +    case AVMEDIA_TYPE_VIDEO:
 +        ret = avcodec_decode_video2(dec_ctx, frame, got_frame, pkt);
 +        break;
 +
 +    case AVMEDIA_TYPE_AUDIO:
 +        ret = avcodec_decode_audio4(dec_ctx, frame, got_frame, pkt);
 +        break;
 +    }
 +
 +    return ret;
 +}
 +
 +static void read_packets(WriterContext *w, AVFormatContext *fmt_ctx)
 +{
 +    AVPacket pkt, pkt1;
 +    AVFrame frame;
 +    int i = 0, ret, got_frame;
 +
 +    av_init_packet(&pkt);
 +
 +    while (!av_read_frame(fmt_ctx, &pkt)) {
 +        if (do_read_packets) {
 +            if (do_show_packets)
 +                show_packet(w, fmt_ctx, &pkt, i++);
 +            nb_streams_packets[pkt.stream_index]++;
 +        }
 +        if (do_read_frames) {
 +            pkt1 = pkt;
 +            while (pkt1.size) {
 +                avcodec_get_frame_defaults(&frame);
 +                ret = get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt1);
 +                if (ret < 0 || !got_frame)
 +                    break;
 +                if (do_show_frames)
 +                    show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
 +                pkt1.data += ret;
 +                pkt1.size -= ret;
 +                nb_streams_frames[pkt.stream_index]++;
 +            }
 +        }
 +        av_free_packet(&pkt);
 +    }
 +    av_init_packet(&pkt);
 +    pkt.data = NULL;
 +    pkt.size = 0;
 +    //Flush remaining frames that are cached in the decoder
 +    for (i = 0; i < fmt_ctx->nb_streams; i++) {
 +        pkt.stream_index = i;
 +        while (get_decoded_frame(fmt_ctx, &frame, &got_frame, &pkt) >= 0 && got_frame) {
 +            if (do_read_frames) {
 +                if (do_show_frames)
 +                    show_frame(w, &frame, fmt_ctx->streams[pkt.stream_index]);
 +                nb_streams_frames[pkt.stream_index]++;
 +            }
 +        }
 +    }
 +}
 +
 +static void show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_idx)
 +{
 +    AVStream *stream = fmt_ctx->streams[stream_idx];
 +    AVCodecContext *dec_ctx;
 +    AVCodec *dec;
 +    char val_str[128];
 +    const char *s;
 +    AVRational display_aspect_ratio;
 +    AVBPrint pbuf;
 +
 +    av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +
 +    print_section_header("stream");
 +
 +    print_int("index", stream->index);
 +
 +    if ((dec_ctx = stream->codec)) {
 +        if ((dec = dec_ctx->codec)) {
 +            print_str("codec_name",      dec->name);
 +            print_str("codec_long_name", dec->long_name);
 +        } else {
 +            print_str_opt("codec_name",      "unknown");
 +            print_str_opt("codec_long_name", "unknown");
 +        }
 +
 +        s = av_get_media_type_string(dec_ctx->codec_type);
 +        if (s) print_str    ("codec_type", s);
 +        else   print_str_opt("codec_type", "unknown");
 +        print_fmt("codec_time_base", "%d/%d", dec_ctx->time_base.num, dec_ctx->time_base.den);
 +
 +        /* print AVI/FourCC tag */
 +        av_get_codec_tag_string(val_str, sizeof(val_str), dec_ctx->codec_tag);
 +        print_str("codec_tag_string",    val_str);
 +        print_fmt("codec_tag", "0x%04x", dec_ctx->codec_tag);
 +
 +        switch (dec_ctx->codec_type) {
 +        case AVMEDIA_TYPE_VIDEO:
 +            print_int("width",        dec_ctx->width);
 +            print_int("height",       dec_ctx->height);
 +            print_int("has_b_frames", dec_ctx->has_b_frames);
 +            if (dec_ctx->sample_aspect_ratio.num) {
 +                print_fmt("sample_aspect_ratio", "%d:%d",
 +                          dec_ctx->sample_aspect_ratio.num,
 +                          dec_ctx->sample_aspect_ratio.den);
 +                av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
 +                          dec_ctx->width  * dec_ctx->sample_aspect_ratio.num,
 +                          dec_ctx->height * dec_ctx->sample_aspect_ratio.den,
 +                          1024*1024);
 +                print_fmt("display_aspect_ratio", "%d:%d",
 +                          display_aspect_ratio.num,
 +                          display_aspect_ratio.den);
 +            } else {
 +                print_str_opt("sample_aspect_ratio", "N/A");
 +                print_str_opt("display_aspect_ratio", "N/A");
 +            }
 +            s = av_get_pix_fmt_name(dec_ctx->pix_fmt);
 +            if (s) print_str    ("pix_fmt", s);
 +            else   print_str_opt("pix_fmt", "unknown");
 +            print_int("level",   dec_ctx->level);
 +            if (dec_ctx->timecode_frame_start >= 0) {
 +                char tcbuf[AV_TIMECODE_STR_SIZE];
 +                av_timecode_make_mpeg_tc_string(tcbuf, dec_ctx->timecode_frame_start);
 +                print_str("timecode", tcbuf);
 +            } else {
 +                print_str_opt("timecode", "N/A");
 +            }
 +            break;
 +
 +        case AVMEDIA_TYPE_AUDIO:
 +            s = av_get_sample_fmt_name(dec_ctx->sample_fmt);
 +            if (s) print_str    ("sample_fmt", s);
 +            else   print_str_opt("sample_fmt", "unknown");
 +            print_val("sample_rate",     dec_ctx->sample_rate, unit_hertz_str);
 +            print_int("channels",        dec_ctx->channels);
 +            print_int("bits_per_sample", av_get_bits_per_sample(dec_ctx->codec_id));
 +            break;
 +        }
 +    } else {
 +        print_str_opt("codec_type", "unknown");
 +    }
 +    if (dec_ctx->codec && dec_ctx->codec->priv_class && show_private_data) {
 +        const AVOption *opt = NULL;
 +        while (opt = av_opt_next(dec_ctx->priv_data,opt)) {
 +            uint8_t *str;
 +            if (opt->flags) continue;
 +            if (av_opt_get(dec_ctx->priv_data, opt->name, 0, &str) >= 0) {
 +                print_str(opt->name, str);
 +                av_free(str);
 +            }
 +        }
 +    }
 +
 +    if (fmt_ctx->iformat->flags & AVFMT_SHOW_IDS) print_fmt    ("id", "0x%x", stream->id);
 +    else                                          print_str_opt("id", "N/A");
 +    print_fmt("r_frame_rate",   "%d/%d", stream->r_frame_rate.num,   stream->r_frame_rate.den);
 +    print_fmt("avg_frame_rate", "%d/%d", stream->avg_frame_rate.num, stream->avg_frame_rate.den);
 +    print_fmt("time_base",      "%d/%d", stream->time_base.num,      stream->time_base.den);
 +    print_time("start_time",    stream->start_time, &stream->time_base);
 +    print_time("duration",      stream->duration,   &stream->time_base);
 +    if (dec_ctx->bit_rate > 0) print_val    ("bit_rate", dec_ctx->bit_rate, unit_bit_per_second_str);
 +    else                       print_str_opt("bit_rate", "N/A");
 +    if (stream->nb_frames) print_fmt    ("nb_frames", "%"PRId64, stream->nb_frames);
 +    else                   print_str_opt("nb_frames", "N/A");
 +    if (nb_streams_frames[stream_idx])  print_fmt    ("nb_read_frames", "%"PRIu64, nb_streams_frames[stream_idx]);
 +    else                                print_str_opt("nb_read_frames", "N/A");
 +    if (nb_streams_packets[stream_idx]) print_fmt    ("nb_read_packets", "%"PRIu64, nb_streams_packets[stream_idx]);
 +    else                                print_str_opt("nb_read_packets", "N/A");
 +    show_tags(stream->metadata);
 +
 +    print_section_footer("stream");
 +    av_bprint_finalize(&pbuf, NULL);
 +    fflush(stdout);
 +}
 +
 +static void show_streams(WriterContext *w, AVFormatContext *fmt_ctx)
 +{
 +    int i;
 +    for (i = 0; i < fmt_ctx->nb_streams; i++)
 +        show_stream(w, fmt_ctx, i);
 +}
 +
 +static void show_format(WriterContext *w, AVFormatContext *fmt_ctx)
 +{
 +    char val_str[128];
 +    int64_t size = fmt_ctx->pb ? avio_size(fmt_ctx->pb) : -1;
 +
 +    print_section_header("format");
 +    print_str("filename",         fmt_ctx->filename);
 +    print_int("nb_streams",       fmt_ctx->nb_streams);
 +    print_str("format_name",      fmt_ctx->iformat->name);
 +    print_str("format_long_name", fmt_ctx->iformat->long_name);
 +    print_time("start_time",      fmt_ctx->start_time, &AV_TIME_BASE_Q);
 +    print_time("duration",        fmt_ctx->duration,   &AV_TIME_BASE_Q);
 +    if (size >= 0) print_val    ("size", size, unit_byte_str);
 +    else           print_str_opt("size", "N/A");
 +    if (fmt_ctx->bit_rate > 0) print_val    ("bit_rate", fmt_ctx->bit_rate, unit_bit_per_second_str);
 +    else                       print_str_opt("bit_rate", "N/A");
 +    show_tags(fmt_ctx->metadata);
 +    print_section_footer("format");
 +    fflush(stdout);
 +}
 +
 +static void show_error(WriterContext *w, int err)
 +{
 +    char errbuf[128];
 +    const char *errbuf_ptr = errbuf;
 +
 +    if (av_strerror(err, errbuf, sizeof(errbuf)) < 0)
 +        errbuf_ptr = strerror(AVUNERROR(err));
 +
 +    writer_print_chapter_header(w, "error");
 +    print_section_header("error");
 +    print_int("code", err);
 +    print_str("string", errbuf_ptr);
 +    print_section_footer("error");
 +    writer_print_chapter_footer(w, "error");
 +}
 +
 +static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
 +{
 +    int err, i;
 +    AVFormatContext *fmt_ctx = NULL;
 +    AVDictionaryEntry *t;
 +
 +    if ((err = avformat_open_input(&fmt_ctx, filename,
 +                                   iformat, &format_opts)) < 0) {
 +        print_error(filename, err);
 +        return err;
 +    }
 +    if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
 +        av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
 +        return AVERROR_OPTION_NOT_FOUND;
 +    }
 +
 +
 +    /* fill the streams in the format context */
 +    if ((err = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
 +        print_error(filename, err);
 +        return err;
 +    }
 +
 +    av_dump_format(fmt_ctx, 0, filename, 0);
 +
 +    /* bind a decoder to each input stream */
 +    for (i = 0; i < fmt_ctx->nb_streams; i++) {
 +        AVStream *stream = fmt_ctx->streams[i];
 +        AVCodec *codec;
 +
 +        if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) {
 +            av_log(NULL, AV_LOG_ERROR,
 +                    "Unsupported codec with id %d for input stream %d\n",
 +                    stream->codec->codec_id, stream->index);
 +        } else if (avcodec_open2(stream->codec, codec, NULL) < 0) {
 +            av_log(NULL, AV_LOG_ERROR, "Error while opening codec for input stream %d\n",
 +                   stream->index);
 +        }
 +    }
 +
 +    *fmt_ctx_ptr = fmt_ctx;
 +    return 0;
 +}
 +
++static void close_input_file(AVFormatContext **ctx_ptr)
++{
++    int i;
++    AVFormatContext *fmt_ctx = *ctx_ptr;
++
++    /* close decoder for each stream */
++    for (i = 0; i < fmt_ctx->nb_streams; i++)
++        if (fmt_ctx->streams[i]->codec->codec_id != CODEC_ID_NONE)
++            avcodec_close(fmt_ctx->streams[i]->codec);
++
++    avformat_close_input(ctx_ptr);
++}
++
 +#define PRINT_CHAPTER(name) do {                                        \
 +    if (do_show_ ## name) {                                             \
 +        writer_print_chapter_header(wctx, #name);                       \
 +        show_ ## name (wctx, fmt_ctx);                                  \
 +        writer_print_chapter_footer(wctx, #name);                       \
 +    }                                                                   \
 +} while (0)
 +
 +static int probe_file(WriterContext *wctx, const char *filename)
 +{
 +    AVFormatContext *fmt_ctx;
 +    int ret, i;
 +
 +    do_read_frames = do_show_frames || do_count_frames;
 +    do_read_packets = do_show_packets || do_count_packets;
 +
 +    ret = open_input_file(&fmt_ctx, filename);
 +    if (ret >= 0) {
 +        nb_streams_frames  = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_frames));
 +        nb_streams_packets = av_calloc(fmt_ctx->nb_streams, sizeof(*nb_streams_packets));
 +        if (do_read_frames || do_read_packets) {
 +            const char *chapter;
 +            if (do_show_frames && do_show_packets &&
 +                wctx->writer->flags & WRITER_FLAG_PUT_PACKETS_AND_FRAMES_IN_SAME_CHAPTER)
 +                chapter = "packets_and_frames";
 +            else if (do_show_packets && !do_show_frames)
 +                chapter = "packets";
 +            else // (!do_show_packets && do_show_frames)
 +                chapter = "frames";
 +            if (do_show_frames || do_show_packets)
 +                writer_print_chapter_header(wctx, chapter);
 +            read_packets(wctx, fmt_ctx);
 +            if (do_show_frames || do_show_packets)
 +                writer_print_chapter_footer(wctx, chapter);
 +        }
 +        PRINT_CHAPTER(streams);
 +        PRINT_CHAPTER(format);
++        close_input_file(&fmt_ctx);
 +        av_freep(&nb_streams_frames);
 +        av_freep(&nb_streams_packets);
 +    }
 +    return ret;
 +}
 +
 +static void show_usage(void)
 +{
 +    av_log(NULL, AV_LOG_INFO, "Simple multimedia streams analyzer\n");
 +    av_log(NULL, AV_LOG_INFO, "usage: %s [OPTIONS] [INPUT_FILE]\n", program_name);
 +    av_log(NULL, AV_LOG_INFO, "\n");
 +}
 +
 +static void ffprobe_show_program_version(WriterContext *w)
 +{
 +    AVBPrint pbuf;
 +    av_bprint_init(&pbuf, 1, AV_BPRINT_SIZE_UNLIMITED);
 +
 +    writer_print_chapter_header(w, "program_version");
 +    print_section_header("program_version");
 +    print_str("version", FFMPEG_VERSION);
 +    print_fmt("copyright", "Copyright (c) %d-%d the FFmpeg developers",
 +              program_birth_year, this_year);
 +    print_str("build_date", __DATE__);
 +    print_str("build_time", __TIME__);
 +    print_str("compiler_type", CC_TYPE);
 +    print_str("compiler_version", CC_VERSION);
 +    print_str("configuration", FFMPEG_CONFIGURATION);
 +    print_section_footer("program_version");
 +    writer_print_chapter_footer(w, "program_version");
 +
 +    av_bprint_finalize(&pbuf, NULL);
 +}
 +
 +#define SHOW_LIB_VERSION(libname, LIBNAME)                              \
 +    do {                                                                \
 +        if (CONFIG_##LIBNAME) {                                         \
 +            unsigned int version = libname##_version();                 \
 +            print_section_header("library_version");                    \
 +            print_str("name",    "lib" #libname);                       \
 +            print_int("major",   LIB##LIBNAME##_VERSION_MAJOR);         \
 +            print_int("minor",   LIB##LIBNAME##_VERSION_MINOR);         \
 +            print_int("micro",   LIB##LIBNAME##_VERSION_MICRO);         \
 +            print_int("version", version);                              \
 +            print_section_footer("library_version");                    \
 +        }                                                               \
 +    } while (0)
 +
 +static void ffprobe_show_library_versions(WriterContext *w)
 +{
 +    writer_print_chapter_header(w, "library_versions");
 +    SHOW_LIB_VERSION(avutil,     AVUTIL);
 +    SHOW_LIB_VERSION(avcodec,    AVCODEC);
 +    SHOW_LIB_VERSION(avformat,   AVFORMAT);
 +    SHOW_LIB_VERSION(avdevice,   AVDEVICE);
 +    SHOW_LIB_VERSION(avfilter,   AVFILTER);
 +    SHOW_LIB_VERSION(swscale,    SWSCALE);
 +    SHOW_LIB_VERSION(swresample, SWRESAMPLE);
 +    SHOW_LIB_VERSION(postproc,   POSTPROC);
 +    writer_print_chapter_footer(w, "library_versions");
 +}
 +
 +static int opt_format(const char *opt, const char *arg)
 +{
 +    iformat = av_find_input_format(arg);
 +    if (!iformat) {
 +        av_log(NULL, AV_LOG_ERROR, "Unknown input format: %s\n", arg);
 +        return AVERROR(EINVAL);
 +    }
 +    return 0;
 +}
 +
 +static int opt_show_format_entry(const char *opt, const char *arg)
 +{
 +    do_show_format = 1;
 +    av_dict_set(&fmt_entries_to_show, arg, "", 0);
 +    return 0;
 +}
 +
 +static void opt_input_file(void *optctx, const char *arg)
 +{
 +    if (input_filename) {
 +        av_log(NULL, AV_LOG_ERROR,
 +                "Argument '%s' provided as input filename, but '%s' was already specified.\n",
 +                arg, input_filename);
 +        exit(1);
 +    }
 +    if (!strcmp(arg, "-"))
 +        arg = "pipe:";
 +    input_filename = arg;
 +}
 +
 +static int opt_help(const char *opt, const char *arg)
 +{
 +    av_log_set_callback(log_callback_help);
 +    show_usage();
 +    show_help_options(options, "Main options:\n", 0, 0);
 +    printf("\n");
 +
 +    show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
 +
 +    return 0;
 +}
 +
 +static int opt_pretty(const char *opt, const char *arg)
 +{
 +    show_value_unit              = 1;
 +    use_value_prefix             = 1;
 +    use_byte_value_binary_prefix = 1;
 +    use_value_sexagesimal_format = 1;
 +    return 0;
 +}
 +
 +static int opt_show_versions(const char *opt, const char *arg)
 +{
 +    do_show_program_version  = 1;
 +    do_show_library_versions = 1;
 +    return 0;
 +}
 +
 +static const OptionDef options[] = {
 +#include "cmdutils_common_opts.h"
 +    { "f", HAS_ARG, {(void*)opt_format}, "force format", "format" },
 +    { "unit", OPT_BOOL, {(void*)&show_value_unit}, "show unit of the displayed values" },
 +    { "prefix", OPT_BOOL, {(void*)&use_value_prefix}, "use SI prefixes for the displayed values" },
 +    { "byte_binary_prefix", OPT_BOOL, {(void*)&use_byte_value_binary_prefix},
 +      "use binary prefixes for byte units" },
 +    { "sexagesimal", OPT_BOOL,  {(void*)&use_value_sexagesimal_format},
 +      "use sexagesimal format HOURS:MM:SS.MICROSECONDS for time units" },
 +    { "pretty", 0, {(void*)&opt_pretty},
 +      "prettify the format of displayed values, make it more human readable" },
 +    { "print_format", OPT_STRING | HAS_ARG, {(void*)&print_format},
 +      "set the output printing format (available formats are: default, compact, csv, json, xml)", "format" },
 +    { "show_error",   OPT_BOOL, {(void*)&do_show_error} ,  "show probing error" },
 +    { "show_format",  OPT_BOOL, {(void*)&do_show_format} , "show format/container info" },
 +    { "show_frames",  OPT_BOOL, {(void*)&do_show_frames} , "show frames info" },
 +    { "show_format_entry", HAS_ARG, {(void*)opt_show_format_entry},
 +      "show a particular entry from the format/container info", "entry" },
 +    { "show_packets", OPT_BOOL, {(void*)&do_show_packets}, "show packets info" },
 +    { "show_streams", OPT_BOOL, {(void*)&do_show_streams}, "show streams info" },
 +    { "count_frames", OPT_BOOL, {(void*)&do_count_frames}, "count the number of frames per stream" },
 +    { "count_packets", OPT_BOOL, {(void*)&do_count_packets}, "count the number of packets per stream" },
 +    { "show_program_version",  OPT_BOOL, {(void*)&do_show_program_version},  "show ffprobe version" },
 +    { "show_library_versions", OPT_BOOL, {(void*)&do_show_library_versions}, "show library versions" },
 +    { "show_versions",         0, {(void*)&opt_show_versions}, "show program and library versions" },
 +    { "show_private_data", OPT_BOOL, {(void*)&show_private_data}, "show private data" },
 +    { "private",           OPT_BOOL, {(void*)&show_private_data}, "same as show_private_data" },
 +    { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
 +    { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
 +    { NULL, },
 +};
 +
 +int main(int argc, char **argv)
 +{
 +    const Writer *w;
 +    WriterContext *wctx;
 +    char *buf;
 +    char *w_name = NULL, *w_args = NULL;
 +    int ret;
 +
 +    av_log_set_flags(AV_LOG_SKIP_REPEATED);
 +    parse_loglevel(argc, argv, options);
 +    av_register_all();
 +    avformat_network_init();
 +    init_opts();
 +#if CONFIG_AVDEVICE
 +    avdevice_register_all();
 +#endif
 +
 +    show_banner(argc, argv, options);
 +    parse_options(NULL, argc, argv, options, opt_input_file);
 +
 +    writer_register_all();
 +
 +    if (!print_format)
 +        print_format = av_strdup("default");
 +    w_name = av_strtok(print_format, "=", &buf);
 +    w_args = buf;
 +
 +    w = writer_get_by_name(w_name);
 +    if (!w) {
 +        av_log(NULL, AV_LOG_ERROR, "Unknown output format with name '%s'\n", w_name);
 +        ret = AVERROR(EINVAL);
 +        goto end;
 +    }
 +
 +    if ((ret = writer_open(&wctx, w, w_args, NULL)) >= 0) {
 +        writer_print_header(wctx);
 +
 +        if (do_show_program_version)
 +            ffprobe_show_program_version(wctx);
 +        if (do_show_library_versions)
 +            ffprobe_show_library_versions(wctx);
 +
 +        if (!input_filename &&
 +            ((do_show_format || do_show_streams || do_show_packets || do_show_error) ||
 +             (!do_show_program_version && !do_show_library_versions))) {
 +            show_usage();
 +            av_log(NULL, AV_LOG_ERROR, "You have to specify one input file.\n");
 +            av_log(NULL, AV_LOG_ERROR, "Use -h to get full help or, even better, run 'man %s'.\n", program_name);
 +            ret = AVERROR(EINVAL);
 +        } else if (input_filename) {
 +            ret = probe_file(wctx, input_filename);
 +            if (ret < 0 && do_show_error)
 +                show_error(wctx, ret);
 +        }
 +
 +        writer_print_footer(wctx);
 +        writer_close(&wctx);
 +    }
 +
 +end:
 +    av_freep(&print_format);
++
++    uninit_opts();
++    av_dict_free(&fmt_entries_to_show);
++
 +    avformat_network_deinit();
 +
 +    return ret;
 +}
@@@ -76,13 -77,6 +77,7 @@@ static av_cold int mp3lame_encode_init(
      if ((s->gfp = lame_init()) == NULL)
          return AVERROR(ENOMEM);
  
-     /* channels */
-     if (avctx->channels > 2) {
-         av_log(avctx, AV_LOG_ERROR,
-                "Invalid number of channels %d, must be <= 2\n", avctx->channels);
-         ret =  AVERROR(EINVAL);
-         goto error;
-     }
++
      lame_set_num_channels(s->gfp, avctx->channels);
      lame_set_mode(s->gfp, avctx->channels > 1 ? JOINT_STEREO : MONO);
  
@@@ -1,20 -1,10 +1,21 @@@
 +include $(SUBDIR)../config.mak
 +
  NAME = avfilter
  FFLIBS = avutil swscale
 -FFLIBS-$(CONFIG_MOVIE_FILTER) += avformat avcodec
+ FFLIBS-$(CONFIG_ASYNCTS_FILTER) += avresample
  FFLIBS-$(CONFIG_RESAMPLE_FILTER) += avresample
  
 -HEADERS = avfilter.h                                                    \
 +FFLIBS-$(CONFIG_ACONVERT_FILTER)             += swresample
 +FFLIBS-$(CONFIG_AMOVIE_FILTER)               += avformat avcodec
 +FFLIBS-$(CONFIG_ARESAMPLE_FILTER)            += swresample
 +FFLIBS-$(CONFIG_MOVIE_FILTER)                += avformat avcodec
 +FFLIBS-$(CONFIG_PAN_FILTER)                  += swresample
 +FFLIBS-$(CONFIG_REMOVELOGO_FILTER)           += avformat avcodec
 +FFLIBS-$(CONFIG_MP_FILTER)                   += avcodec postproc
 +
 +HEADERS = asrc_abuffer.h                                                \
 +          avcodec.h                                                     \
 +          avfilter.h                                                    \
            avfiltergraph.h                                               \
            buffersink.h                                                  \
            buffersrc.h                                                   \
@@@ -25,47 -15,27 +26,51 @@@ OBJS = allfilters.
         audio.o                                                          \
         avfilter.o                                                       \
         avfiltergraph.o                                                  \
+        buffer.o                                                         \
         buffersink.o                                                     \
 -       buffersrc.o                                                      \
         defaults.o                                                       \
         drawutils.o                                                      \
         formats.o                                                        \
 +       graphdump.o                                                      \
         graphparser.o                                                    \
 +       src_buffer.o                                                     \
 +       transform.o                                                      \
         vf_scale.o                                                       \
+        video.o                                                          \
 +
 +OBJS-$(CONFIG_AVCODEC)                       += avcodec.o
 +OBJS-$(CONFIG_AVFORMAT)                      += lavfutils.o
 +OBJS-$(CONFIG_SWSCALE)                       += lswsutils.o
 +
 +OBJS-$(CONFIG_ACONVERT_FILTER)               += af_aconvert.o
  OBJS-$(CONFIG_AFORMAT_FILTER)                += af_aformat.o
 +OBJS-$(CONFIG_AMERGE_FILTER)                 += af_amerge.o
  OBJS-$(CONFIG_ANULL_FILTER)                  += af_anull.o
 +OBJS-$(CONFIG_ARESAMPLE_FILTER)              += af_aresample.o
 +OBJS-$(CONFIG_ASHOWINFO_FILTER)              += af_ashowinfo.o
 +OBJS-$(CONFIG_ASPLIT_FILTER)                 += af_asplit.o
 +OBJS-$(CONFIG_ASTREAMSYNC_FILTER)            += af_astreamsync.o
+ OBJS-$(CONFIG_ASYNCTS_FILTER)                += af_asyncts.o
 +OBJS-$(CONFIG_EARWAX_FILTER)                 += af_earwax.o
 +OBJS-$(CONFIG_PAN_FILTER)                    += af_pan.o
  OBJS-$(CONFIG_RESAMPLE_FILTER)               += af_resample.o
 +OBJS-$(CONFIG_SILENCEDETECT_FILTER)          += af_silencedetect.o
 +OBJS-$(CONFIG_VOLUME_FILTER)                 += af_volume.o
  
 +OBJS-$(CONFIG_AEVALSRC_FILTER)               += asrc_aevalsrc.o
 +OBJS-$(CONFIG_AMOVIE_FILTER)                 += src_movie.o
  OBJS-$(CONFIG_ANULLSRC_FILTER)               += asrc_anullsrc.o
  
 +OBJS-$(CONFIG_ABUFFERSINK_FILTER)            += sink_buffer.o
  OBJS-$(CONFIG_ANULLSINK_FILTER)              += asink_anullsink.o
  
 +OBJS-$(CONFIG_ASS_FILTER)                    += vf_ass.o
 +OBJS-$(CONFIG_BBOX_FILTER)                   += bbox.o vf_bbox.o
 +OBJS-$(CONFIG_BLACKDETECT_FILTER)            += vf_blackdetect.o
  OBJS-$(CONFIG_BLACKFRAME_FILTER)             += vf_blackframe.o
  OBJS-$(CONFIG_BOXBLUR_FILTER)                += vf_boxblur.o
 +OBJS-$(CONFIG_COLORMATRIX_FILTER)            += vf_colormatrix.o
  OBJS-$(CONFIG_COPY_FILTER)                   += vf_copy.o
  OBJS-$(CONFIG_CROP_FILTER)                   += vf_crop.o
  OBJS-$(CONFIG_CROPDETECT_FILTER)             += vf_cropdetect.o
index 9d420f8,0000000..2b3a330
mode 100644,000000..100644
--- /dev/null
@@@ -1,178 -1,0 +1,179 @@@
-     avfilter_formats_ref(avfilter_make_all_channel_layouts(),
-                          &inlink->out_chlayouts);
 +/*
 + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
 + * Copyright (c) 2011 Stefano Sabatini
 + * Copyright (c) 2011 Mina Nagy Zaki
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * sample format and channel layout conversion audio filter
 + */
 +
 +#include "libavutil/avstring.h"
 +#include "libswresample/swresample.h"
 +#include "avfilter.h"
 +#include "audio.h"
 +#include "internal.h"
 +
 +typedef struct {
 +    enum AVSampleFormat  out_sample_fmt;
 +    int64_t              out_chlayout;
 +    struct SwrContext *swr;
 +} AConvertContext;
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
 +{
 +    AConvertContext *aconvert = ctx->priv;
 +    char *arg, *ptr = NULL;
 +    int ret = 0;
 +    char *args = av_strdup(args0);
 +
 +    aconvert->out_sample_fmt  = AV_SAMPLE_FMT_NONE;
 +    aconvert->out_chlayout    = 0;
 +
 +    if ((arg = av_strtok(args, ":", &ptr)) && strcmp(arg, "auto")) {
 +        if ((ret = ff_parse_sample_format(&aconvert->out_sample_fmt, arg, ctx)) < 0)
 +            goto end;
 +    }
 +    if ((arg = av_strtok(NULL, ":", &ptr)) && strcmp(arg, "auto")) {
 +        if ((ret = ff_parse_channel_layout(&aconvert->out_chlayout, arg, ctx)) < 0)
 +            goto end;
 +    }
 +
 +end:
 +    av_freep(&args);
 +    return ret;
 +}
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    AConvertContext *aconvert = ctx->priv;
 +    swr_free(&aconvert->swr);
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    AVFilterFormats *formats = NULL;
 +    AConvertContext *aconvert = ctx->priv;
 +    AVFilterLink *inlink  = ctx->inputs[0];
 +    AVFilterLink *outlink = ctx->outputs[0];
 +    int out_packing = av_sample_fmt_is_planar(aconvert->out_sample_fmt);
++    AVFilterChannelLayouts *layouts;
 +
 +    avfilter_formats_ref(avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO),
 +                         &inlink->out_formats);
 +    if (aconvert->out_sample_fmt != AV_SAMPLE_FMT_NONE) {
 +        formats = NULL;
 +        avfilter_add_format(&formats, aconvert->out_sample_fmt);
 +        avfilter_formats_ref(formats, &outlink->in_formats);
 +    } else
 +        avfilter_formats_ref(avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO),
 +                             &outlink->in_formats);
 +
-         formats = NULL;
-         avfilter_add_format(&formats, aconvert->out_chlayout);
-         avfilter_formats_ref(formats, &outlink->in_chlayouts);
++    ff_channel_layouts_ref(ff_all_channel_layouts(),
++                         &inlink->out_channel_layouts);
 +    if (aconvert->out_chlayout != 0) {
-         avfilter_formats_ref(avfilter_make_all_channel_layouts(),
-                              &outlink->in_chlayouts);
++        layouts = NULL;
++        ff_add_channel_layout(&layouts, aconvert->out_chlayout);
++        ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
 +    } else
++        ff_channel_layouts_ref(ff_all_channel_layouts(),
++                             &outlink->in_channel_layouts);
 +
 +    avfilter_formats_ref(avfilter_make_all_packing_formats(),
 +                         &inlink->out_packing);
 +    formats = NULL;
 +    avfilter_add_format(&formats, out_packing);
 +    avfilter_formats_ref(formats, &outlink->in_packing);
 +
 +    return 0;
 +}
 +
 +static int config_output(AVFilterLink *outlink)
 +{
 +    int ret;
 +    AVFilterContext *ctx = outlink->src;
 +    AVFilterLink *inlink = ctx->inputs[0];
 +    AConvertContext *aconvert = ctx->priv;
 +    char buf1[64], buf2[64];
 +
 +    /* if not specified in args, use the format and layout of the output */
 +    if (aconvert->out_sample_fmt == AV_SAMPLE_FMT_NONE)
 +        aconvert->out_sample_fmt = outlink->format;
 +    if (aconvert->out_chlayout   == 0)
 +        aconvert->out_chlayout   = outlink->channel_layout;
 +
 +    aconvert->swr = swr_alloc_set_opts(aconvert->swr,
 +                                       aconvert->out_chlayout, aconvert->out_sample_fmt, inlink->sample_rate,
 +                                       inlink->channel_layout, inlink->format,           inlink->sample_rate,
 +                                       0, ctx);
 +    if (!aconvert->swr)
 +        return AVERROR(ENOMEM);
 +    ret = swr_init(aconvert->swr);
 +    if (ret < 0)
 +        return ret;
 +
 +    av_get_channel_layout_string(buf1, sizeof(buf1),
 +                                 -1, inlink ->channel_layout);
 +    av_get_channel_layout_string(buf2, sizeof(buf2),
 +                                 -1, outlink->channel_layout);
 +    av_log(ctx, AV_LOG_INFO,
 +           "fmt:%s cl:%s planar:%i -> fmt:%s cl:%s planar:%i\n",
 +           av_get_sample_fmt_name(inlink ->format), buf1, inlink ->planar,
 +           av_get_sample_fmt_name(outlink->format), buf2, outlink->planar);
 +
 +    return 0;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
 +{
 +    AConvertContext *aconvert = inlink->dst->priv;
 +    const int n = insamplesref->audio->nb_samples;
 +    AVFilterLink *const outlink = inlink->dst->outputs[0];
 +    AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
 +
 +    swr_convert(aconvert->swr, outsamplesref->data, n,
 +                        (void *)insamplesref->data, n);
 +
 +    avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
 +    outsamplesref->audio->channel_layout = outlink->channel_layout;
 +    outsamplesref->audio->planar         = outlink->planar;
 +
 +    ff_filter_samples(outlink, outsamplesref);
 +    avfilter_unref_buffer(insamplesref);
 +}
 +
 +AVFilter avfilter_af_aconvert = {
 +    .name          = "aconvert",
 +    .description   = NULL_IF_CONFIG_SMALL("Convert the input audio to sample_fmt:channel_layout:packed_fmt."),
 +    .priv_size     = sizeof(AConvertContext),
 +    .init          = init,
 +    .uninit        = uninit,
 +    .query_formats = query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_AUDIO,
 +                                    .filter_samples  = filter_samples,
 +                                    .min_perms       = AV_PERM_READ, },
 +                                  { .name = NULL}},
 +    .outputs   = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_AUDIO,
 +                                    .config_props    = config_output, },
 +                                  { .name = NULL}},
 +};
Simple merge
index faa64e1,0000000..fcc1089
mode 100644,000000..100644
--- /dev/null
@@@ -1,297 -1,0 +1,298 @@@
-         if (!ctx->inputs[i]->in_chlayouts ||
-             !ctx->inputs[i]->in_chlayouts->format_count) {
 +/*
 + * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * Audio merging filter
 + */
 +
 +#include "libswresample/swresample.h" // only for SWR_CH_MAX
 +#include "avfilter.h"
 +#include "audio.h"
 +#include "internal.h"
 +
 +#define QUEUE_SIZE 16
 +
 +typedef struct {
 +    int nb_in_ch[2];       /**< number of channels for each input */
 +    int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
 +    int bps;
 +    struct amerge_queue {
 +        AVFilterBufferRef *buf[QUEUE_SIZE];
 +        int nb_buf, nb_samples, pos;
 +    } queue[2];
 +} AMergeContext;
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    AMergeContext *am = ctx->priv;
 +    int i, j;
 +
 +    for (i = 0; i < 2; i++)
 +        for (j = 0; j < am->queue[i].nb_buf; j++)
 +            avfilter_unref_buffer(am->queue[i].buf[j]);
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    AMergeContext *am = ctx->priv;
 +    int64_t inlayout[2], outlayout;
 +    const int packing_fmts[] = { AVFILTER_PACKED, -1 };
 +    AVFilterFormats *formats;
++    AVFilterChannelLayouts *layouts;
 +    int i;
 +
 +    for (i = 0; i < 2; i++) {
-         inlayout[i] = ctx->inputs[i]->in_chlayouts->formats[0];
-         if (ctx->inputs[i]->in_chlayouts->format_count > 1) {
++        if (!ctx->inputs[i]->in_channel_layouts ||
++            !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "No channel layout for input %d\n", i + 1);
 +            return AVERROR(EINVAL);
 +        }
-         formats = NULL;
-         avfilter_add_format(&formats, inlayout[i]);
-         avfilter_formats_ref(formats, &ctx->inputs[i]->out_chlayouts);
++        inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
++        if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
 +            char buf[256];
 +            av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
 +            av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
 +        }
 +        am->nb_in_ch[i] = av_get_channel_layout_nb_channels(inlayout[i]);
 +    }
 +    if (am->nb_in_ch[0] + am->nb_in_ch[1] > SWR_CH_MAX) {
 +        av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
 +        return AVERROR(EINVAL);
 +    }
 +    if (inlayout[0] & inlayout[1]) {
 +        av_log(ctx, AV_LOG_WARNING,
 +               "Inputs overlap: output layout will be meaningless\n");
 +        for (i = 0; i < am->nb_in_ch[0] + am->nb_in_ch[1]; i++)
 +            am->route[i] = i;
 +        outlayout = av_get_default_channel_layout(am->nb_in_ch[0] +
 +                                                  am->nb_in_ch[1]);
 +        if (!outlayout)
 +            outlayout = ((int64_t)1 << (am->nb_in_ch[0] + am->nb_in_ch[1])) - 1;
 +    } else {
 +        int *route[2] = { am->route, am->route + am->nb_in_ch[0] };
 +        int c, out_ch_number = 0;
 +
 +        outlayout = inlayout[0] | inlayout[1];
 +        for (c = 0; c < 64; c++)
 +            for (i = 0; i < 2; i++)
 +                if ((inlayout[i] >> c) & 1)
 +                    *(route[i]++) = out_ch_number++;
 +    }
 +    formats = avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +    formats = avfilter_make_format_list(packing_fmts);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +    for (i = 0; i < 2; i++) {
-     formats = NULL;
-     avfilter_add_format(&formats, outlayout);
-     avfilter_formats_ref(formats, &ctx->outputs[0]->in_chlayouts);
++        layouts = NULL;
++        ff_add_channel_layout(&layouts, inlayout[i]);
++        ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
 +    }
++    layouts = NULL;
++    ff_add_channel_layout(&layouts, outlayout);
++    ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts);
 +    return 0;
 +}
 +
 +static int config_output(AVFilterLink *outlink)
 +{
 +    AVFilterContext *ctx = outlink->src;
 +    AMergeContext *am = ctx->priv;
 +    int64_t layout;
 +    char name[3][256];
 +    int i;
 +
 +    if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
 +        av_log(ctx, AV_LOG_ERROR,
 +               "Inputs must have the same sample rate "
 +               "(%"PRIi64" vs %"PRIi64")\n",
 +               ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
 +        return AVERROR(EINVAL);
 +    }
 +    am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
 +    outlink->sample_rate = ctx->inputs[0]->sample_rate;
 +    outlink->time_base   = ctx->inputs[0]->time_base;
 +    for (i = 0; i < 3; i++) {
 +        layout = (i < 2 ? ctx->inputs[i] : ctx->outputs[0])->channel_layout;
 +        av_get_channel_layout_string(name[i], sizeof(name[i]), -1, layout);
 +    }
 +    av_log(ctx, AV_LOG_INFO,
 +           "in1:%s + in2:%s -> out:%s\n", name[0], name[1], name[2]);
 +    return 0;
 +}
 +
 +static int request_frame(AVFilterLink *outlink)
 +{
 +    AVFilterContext *ctx = outlink->src;
 +    AMergeContext *am = ctx->priv;
 +    int i, ret;
 +
 +    for (i = 0; i < 2; i++)
 +        if (!am->queue[i].nb_samples)
 +            if ((ret = avfilter_request_frame(ctx->inputs[i])) < 0)
 +                return ret;
 +    return 0;
 +}
 +
 +/**
 + * Copy samples from two input streams to one output stream.
 + * @param nb_in_ch  number of channels in each input stream
 + * @param route     routing values;
 + *                  input channel i goes to output channel route[i];
 + *                  i <  nb_in_ch[0] are the channels from the first output;
 + *                  i >= nb_in_ch[0] are the channels from the second output
 + * @param ins       pointer to the samples of each inputs, in packed format;
 + *                  will be left at the end of the copied samples
 + * @param outs      pointer to the samples of the output, in packet format;
 + *                  must point to a buffer big enough;
 + *                  will be left at the end of the copied samples
 + * @param ns        number of samples to copy
 + * @param bps       bytes per sample
 + */
 +static inline void copy_samples(int nb_in_ch[2], int *route, uint8_t *ins[2],
 +                                uint8_t **outs, int ns, int bps)
 +{
 +    int *route_cur;
 +    int i, c;
 +
 +    while (ns--) {
 +        route_cur = route;
 +        for (i = 0; i < 2; i++) {
 +            for (c = 0; c < nb_in_ch[i]; c++) {
 +                memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
 +                ins[i] += bps;
 +            }
 +        }
 +        *outs += (nb_in_ch[0] + nb_in_ch[1]) * bps;
 +    }
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 +{
 +    AVFilterContext *ctx = inlink->dst;
 +    AMergeContext *am = ctx->priv;
 +    AVFilterLink *const outlink = ctx->outputs[0];
 +    int input_number = inlink == ctx->inputs[1];
 +    struct amerge_queue *inq = &am->queue[input_number];
 +    int nb_samples, ns, i;
 +    AVFilterBufferRef *outbuf, **inbuf[2];
 +    uint8_t *ins[2], *outs;
 +
 +    if (inq->nb_buf == QUEUE_SIZE) {
 +        av_log(ctx, AV_LOG_ERROR, "Packet queue overflow; dropped\n");
 +        avfilter_unref_buffer(insamples);
 +        return;
 +    }
 +    inq->buf[inq->nb_buf++] = avfilter_ref_buffer(insamples, AV_PERM_READ |
 +                                                             AV_PERM_PRESERVE);
 +    inq->nb_samples += insamples->audio->nb_samples;
 +    avfilter_unref_buffer(insamples);
 +    if (!am->queue[!input_number].nb_samples)
 +        return;
 +
 +    nb_samples = FFMIN(am->queue[0].nb_samples,
 +                       am->queue[1].nb_samples);
 +    outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE,
 +                                       nb_samples);
 +    outs = outbuf->data[0];
 +    for (i = 0; i < 2; i++) {
 +        inbuf[i] = am->queue[i].buf;
 +        ins[i] = (*inbuf[i])->data[0] +
 +                 am->queue[i].pos * am->nb_in_ch[i] * am->bps;
 +    }
 +
 +    avfilter_copy_buffer_ref_props(outbuf, *inbuf[0]);
 +    outbuf->audio->nb_samples     = nb_samples;
 +    outbuf->audio->channel_layout = outlink->channel_layout;
 +    outbuf->audio->planar         = outlink->planar;
 +
 +    while (nb_samples) {
 +        ns = nb_samples;
 +        for (i = 0; i < 2; i++)
 +            ns = FFMIN(ns, (*inbuf[i])->audio->nb_samples - am->queue[i].pos);
 +        /* Unroll the most common sample formats: speed +~350% for the loop,
 +           +~13% overall (including two common decoders) */
 +        switch (am->bps) {
 +            case 1:
 +                copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, 1);
 +                break;
 +            case 2:
 +                copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, 2);
 +                break;
 +            case 4:
 +                copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, 4);
 +                break;
 +            default:
 +                copy_samples(am->nb_in_ch, am->route, ins, &outs, ns, am->bps);
 +                break;
 +        }
 +
 +        nb_samples -= ns;
 +        for (i = 0; i < 2; i++) {
 +            am->queue[i].nb_samples -= ns;
 +            am->queue[i].pos += ns;
 +            if (am->queue[i].pos == (*inbuf[i])->audio->nb_samples) {
 +                am->queue[i].pos = 0;
 +                avfilter_unref_buffer(*inbuf[i]);
 +                *inbuf[i] = NULL;
 +                inbuf[i]++;
 +                ins[i] = *inbuf[i] ? (*inbuf[i])->data[0] : NULL;
 +            }
 +        }
 +    }
 +    for (i = 0; i < 2; i++) {
 +        int nbufused = inbuf[i] - am->queue[i].buf;
 +        if (nbufused) {
 +            am->queue[i].nb_buf -= nbufused;
 +            memmove(am->queue[i].buf, inbuf[i],
 +                    am->queue[i].nb_buf * sizeof(**inbuf));
 +        }
 +    }
 +    ff_filter_samples(ctx->outputs[0], outbuf);
 +}
 +
 +AVFilter avfilter_af_amerge = {
 +    .name          = "amerge",
 +    .description   = NULL_IF_CONFIG_SMALL("Merge two audio streams into "
 +                                          "a single multi-channel stream."),
 +    .priv_size     = sizeof(AMergeContext),
 +    .uninit        = uninit,
 +    .query_formats = query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {
 +        { .name             = "in1",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .filter_samples   = filter_samples,
 +          .min_perms        = AV_PERM_READ, },
 +        { .name             = "in2",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .filter_samples   = filter_samples,
 +          .min_perms        = AV_PERM_READ, },
 +        { .name = NULL }
 +    },
 +    .outputs   = (const AVFilterPad[]) {
 +        { .name             = "default",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .config_props     = config_output,
 +          .request_frame    = request_frame, },
 +        { .name = NULL }
 +    },
 +};
index a2980d9,0000000..521ccf7
mode 100644,000000..100644
--- /dev/null
@@@ -1,127 -1,0 +1,164 @@@
 +/*
 + * Copyright (c) 2011 Stefano Sabatini
 + * Copyright (c) 2011 Mina Nagy Zaki
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * resampling audio filter
 + */
 +
 +#include "libswresample/swresample.h"
 +#include "avfilter.h"
 +#include "audio.h"
 +#include "internal.h"
 +
 +typedef struct {
 +    int out_rate;
 +    double ratio;
 +    struct SwrContext *swr;
 +} AResampleContext;
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    AResampleContext *aresample = ctx->priv;
 +    int ret;
 +
 +    if (args) {
 +        if ((ret = ff_parse_sample_rate(&aresample->out_rate, args, ctx)) < 0)
 +            return ret;
 +    } else {
 +        aresample->out_rate = -1;
 +    }
 +
 +    return 0;
 +}
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    AResampleContext *aresample = ctx->priv;
 +    swr_free(&aresample->swr);
 +}
 +
++static int query_formats(AVFilterContext *ctx)
++{
++    AResampleContext *aresample = ctx->priv;
++
++    AVFilterLink *inlink  = ctx->inputs[0];
++    AVFilterLink *outlink = ctx->outputs[0];
++
++    AVFilterFormats        *in_formats      = avfilter_all_formats(AVMEDIA_TYPE_AUDIO);
++    AVFilterFormats        *out_formats     = avfilter_all_formats(AVMEDIA_TYPE_AUDIO);
++    AVFilterFormats        *in_samplerates  = ff_all_samplerates();
++    AVFilterFormats        *out_samplerates;
++
++
++    AVFilterChannelLayouts *in_layouts      = ff_all_channel_layouts();
++    AVFilterChannelLayouts *out_layouts     = ff_all_channel_layouts();
++
++    avfilter_formats_ref(in_formats,  &inlink->out_formats);
++    avfilter_formats_ref(out_formats, &outlink->in_formats);
++
++    avfilter_formats_ref(in_samplerates,  &inlink->out_samplerates);
++
++    ff_channel_layouts_ref(in_layouts,  &inlink->out_channel_layouts);
++    ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
++
++    if(aresample->out_rate > 0) {
++        int sample_rates[] = { aresample->out_rate, -1 };
++        ff_set_common_samplerates(ctx, avfilter_make_format_list(sample_rates));
++    } else {
++        out_samplerates = ff_all_samplerates();
++        avfilter_formats_ref(out_samplerates, &outlink->in_samplerates);
++    }
++
++    return 0;
++}
++
++
 +static int config_output(AVFilterLink *outlink)
 +{
 +    int ret;
 +    AVFilterContext *ctx = outlink->src;
 +    AVFilterLink *inlink = ctx->inputs[0];
 +    AResampleContext *aresample = ctx->priv;
 +
 +    if (aresample->out_rate == -1)
 +        aresample->out_rate = outlink->sample_rate;
 +    else
 +        outlink->sample_rate = aresample->out_rate;
 +    outlink->time_base = (AVRational) {1, aresample->out_rate};
 +
 +    //TODO: make the resampling parameters (filter size, phrase shift, linear, cutoff) configurable
 +    aresample->swr = swr_alloc_set_opts(aresample->swr,
 +                                        inlink->channel_layout, inlink->format, aresample->out_rate,
 +                                        inlink->channel_layout, inlink->format, inlink->sample_rate,
 +                                        0, ctx);
 +    if (!aresample->swr)
 +        return AVERROR(ENOMEM);
 +    ret = swr_init(aresample->swr);
 +    if (ret < 0)
 +        return ret;
 +
 +    aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
 +
 +    av_log(ctx, AV_LOG_INFO, "r:%"PRId64"Hz -> r:%"PRId64"Hz\n",
 +           inlink->sample_rate, outlink->sample_rate);
 +    return 0;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
 +{
 +    AResampleContext *aresample = inlink->dst->priv;
 +    const int n_in  = insamplesref->audio->nb_samples;
 +    int n_out       = n_in * aresample->ratio;
 +    AVFilterLink *const outlink = inlink->dst->outputs[0];
 +    AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
 +
 +    n_out = swr_convert(aresample->swr, outsamplesref->data, n_out,
 +                                 (void *)insamplesref->data, n_in);
 +
 +    avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
 +    outsamplesref->audio->sample_rate = outlink->sample_rate;
 +    outsamplesref->audio->nb_samples  = n_out;
 +    outsamplesref->pts = insamplesref->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
 +        av_rescale(outlink->sample_rate, insamplesref->pts, inlink ->sample_rate);
 +
 +    ff_filter_samples(outlink, outsamplesref);
 +    avfilter_unref_buffer(insamplesref);
 +}
 +
 +AVFilter avfilter_af_aresample = {
 +    .name          = "aresample",
 +    .description   = NULL_IF_CONFIG_SMALL("Resample audio data."),
 +    .init          = init,
 +    .uninit        = uninit,
++    .query_formats = query_formats,
 +    .priv_size     = sizeof(AResampleContext),
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_AUDIO,
 +                                    .filter_samples  = filter_samples,
 +                                    .min_perms       = AV_PERM_READ, },
 +                                  { .name = NULL}},
 +    .outputs   = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .config_props    = config_output,
 +                                    .type            = AVMEDIA_TYPE_AUDIO, },
 +                                  { .name = NULL}},
 +};
index faa3b7c,0000000..9f04023
mode 100644,000000..100644
--- /dev/null
@@@ -1,209 -1,0 +1,210 @@@
-         formats = ctx->inputs[i]->in_chlayouts;
-         avfilter_formats_ref(formats, &ctx->inputs[i]->out_chlayouts);
-         avfilter_formats_ref(formats, &ctx->outputs[i]->in_chlayouts);
 +/*
 + * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * Stream (de)synchronization filter
 + */
 +
 +#include "libavutil/eval.h"
 +#include "avfilter.h"
 +#include "audio.h"
 +#include "internal.h"
 +
 +#define QUEUE_SIZE 16
 +
 +static const char * const var_names[] = {
 +    "b1", "b2",
 +    "s1", "s2",
 +    "t1", "t2",
 +    NULL
 +};
 +
 +enum var_name {
 +    VAR_B1, VAR_B2,
 +    VAR_S1, VAR_S2,
 +    VAR_T1, VAR_T2,
 +    VAR_NB
 +};
 +
 +typedef struct {
 +    AVExpr *expr;
 +    double var_values[VAR_NB];
 +    struct buf_queue {
 +        AVFilterBufferRef *buf[QUEUE_SIZE];
 +        unsigned tail, nb;
 +        /* buf[tail] is the oldest,
 +           buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
 +    } queue[2];
 +    int req[2];
 +    int next_out;
 +    int eof; /* bitmask, one bit for each stream */
 +} AStreamSyncContext;
 +
 +static const char *default_expr = "t1-t2";
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
 +{
 +    AStreamSyncContext *as = ctx->priv;
 +    const char *expr = args0 ? args0 : default_expr;
 +    int r, i;
 +
 +    r = av_expr_parse(&as->expr, expr, var_names,
 +                      NULL, NULL, NULL, NULL, 0, ctx);
 +    if (r < 0) {
 +        av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", expr);
 +        return r;
 +    }
 +    for (i = 0; i < 42; i++)
 +        av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
 +    return 0;
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    int i;
 +    AVFilterFormats *formats;
++    AVFilterChannelLayouts *layouts;
 +
 +    for (i = 0; i < 2; i++) {
 +        formats = ctx->inputs[i]->in_formats;
 +        avfilter_formats_ref(formats, &ctx->inputs[i]->out_formats);
 +        avfilter_formats_ref(formats, &ctx->outputs[i]->in_formats);
 +        formats = ctx->inputs[i]->in_packing;
 +        avfilter_formats_ref(formats, &ctx->inputs[i]->out_packing);
 +        avfilter_formats_ref(formats, &ctx->outputs[i]->in_packing);
++        layouts = ctx->inputs[i]->in_channel_layouts;
++        ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts);
++        ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts);
 +    }
 +    return 0;
 +}
 +
 +static int config_output(AVFilterLink *outlink)
 +{
 +    AVFilterContext *ctx = outlink->src;
 +    int id = outlink == ctx->outputs[1];
 +
 +    outlink->sample_rate = ctx->inputs[id]->sample_rate;
 +    outlink->time_base   = ctx->inputs[id]->time_base;
 +    return 0;
 +}
 +
 +static void send_out(AVFilterContext *ctx, int out_id)
 +{
 +    AStreamSyncContext *as = ctx->priv;
 +    struct buf_queue *queue = &as->queue[out_id];
 +    AVFilterBufferRef *buf = queue->buf[queue->tail];
 +
 +    queue->buf[queue->tail] = NULL;
 +    as->var_values[VAR_B1 + out_id]++;
 +    as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples;
 +    if (buf->pts != AV_NOPTS_VALUE)
 +        as->var_values[VAR_T1 + out_id] =
 +            av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
 +    as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples /
 +                                   (double)ctx->inputs[out_id]->sample_rate;
 +    ff_filter_samples(ctx->outputs[out_id], buf);
 +    queue->nb--;
 +    queue->tail = (queue->tail + 1) % QUEUE_SIZE;
 +    if (as->req[out_id])
 +        as->req[out_id]--;
 +}
 +
 +static void send_next(AVFilterContext *ctx)
 +{
 +    AStreamSyncContext *as = ctx->priv;
 +    int i;
 +
 +    while (1) {
 +        if (!as->queue[as->next_out].nb)
 +            break;
 +        send_out(ctx, as->next_out);
 +        if (!as->eof)
 +            as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
 +    }
 +    for (i = 0; i < 2; i++)
 +        if (as->queue[i].nb == QUEUE_SIZE)
 +            send_out(ctx, i);
 +}
 +
 +static int request_frame(AVFilterLink *outlink)
 +{
 +    AVFilterContext *ctx = outlink->src;
 +    AStreamSyncContext *as = ctx->priv;
 +    int id = outlink == ctx->outputs[1];
 +
 +    as->req[id]++;
 +    while (as->req[id] && !(as->eof & (1 << id))) {
 +        if (as->queue[as->next_out].nb) {
 +            send_next(ctx);
 +        } else {
 +            as->eof |= 1 << as->next_out;
 +            avfilter_request_frame(ctx->inputs[as->next_out]);
 +            if (as->eof & (1 << as->next_out))
 +                as->next_out = !as->next_out;
 +        }
 +    }
 +    return 0;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 +{
 +    AVFilterContext *ctx = inlink->dst;
 +    AStreamSyncContext *as = ctx->priv;
 +    int id = inlink == ctx->inputs[1];
 +
 +    as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
 +        insamples;
 +    as->eof &= ~(1 << id);
 +    send_next(ctx);
 +}
 +
 +AVFilter avfilter_af_astreamsync = {
 +    .name          = "astreamsync",
 +    .description   = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
 +                                          "in a configurable order."),
 +    .priv_size     = sizeof(AStreamSyncContext),
 +    .init          = init,
 +    .query_formats = query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {
 +        { .name             = "in1",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .filter_samples   = filter_samples,
 +          .min_perms        = AV_PERM_READ, },
 +        { .name             = "in2",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .filter_samples   = filter_samples,
 +          .min_perms        = AV_PERM_READ, },
 +        { .name = NULL }
 +    },
 +    .outputs   = (const AVFilterPad[]) {
 +        { .name             = "out1",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .config_props     = config_output,
 +          .request_frame    = request_frame, },
 +        { .name             = "out2",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .config_props     = config_output,
 +          .request_frame    = request_frame, },
 +        { .name = NULL }
 +    },
 +};
index 6afed72,0000000..2ab248f
mode 100644,000000..100644
--- /dev/null
@@@ -1,164 -1,0 +1,169 @@@
-     formats = NULL;
-     avfilter_add_format(&formats, AV_CH_LAYOUT_STEREO);
-     avfilter_set_common_channel_layouts(ctx, formats);
 +/*
 + * Copyright (c) 2011 Mina Nagy Zaki
 + * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
 + * This source code is freely redistributable and may be used for any purpose.
 + * This copyright notice must be maintained.  Edward Beingessner And Sundry
 + * Contributors are not responsible for the consequences of using this
 + * software.
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * Stereo Widening Effect. Adds audio cues to move stereo image in
 + * front of the listener. Adapted from the libsox earwax effect.
 + */
 +
 +#include "libavutil/audioconvert.h"
 +#include "avfilter.h"
 +#include "audio.h"
++#include "formats.h"
 +
 +#define NUMTAPS 64
 +
 +static const int8_t filt[NUMTAPS] = {
 +/* 30°  330° */
 +    4,   -6,     /* 32 tap stereo FIR filter. */
 +    4,  -11,     /* One side filters as if the */
 +   -1,   -5,     /* signal was from 30 degrees */
 +    3,    3,     /* from the ear, the other as */
 +   -2,    5,     /* if 330 degrees. */
 +   -5,    0,
 +    9,    1,
 +    6,    3,     /*                         Input                         */
 +   -4,   -1,     /*                   Left         Right                  */
 +   -5,   -3,     /*                __________   __________                */
 +   -2,   -5,     /*               |          | |          |               */
 +   -7,    1,     /*           .---|  Hh,0(f) | |  Hh,0(f) |---.           */
 +    6,   -7,     /*          /    |__________| |__________|    \          */
 +   30,  -29,     /*         /                \ /                \         */
 +   12,   -3,     /*        /                  X                  \        */
 +  -11,    4,     /*       /                  / \                  \       */
 +   -3,    7,     /*  ____V_____   __________V   V__________   _____V____  */
 +  -20,   23,     /* |          | |          |   |          | |          | */
 +    2,    0,     /* | Hh,30(f) | | Hh,330(f)|   | Hh,330(f)| | Hh,30(f) | */
 +    1,   -6,     /* |__________| |__________|   |__________| |__________| */
 +  -14,   -5,     /*      \     ___      /           \      ___     /      */
 +   15,  -18,     /*       \   /   \    /    _____    \    /   \   /       */
 +    6,    7,     /*        `->| + |<--'    /     \    `-->| + |<-'        */
 +   15,  -10,     /*           \___/      _/       \_      \___/           */
 +  -14,   22,     /*               \     / \       / \     /               */
 +   -7,   -2,     /*                `--->| |       | |<---'                */
 +   -4,    9,     /*                     \_/       \_/                     */
 +    6,  -12,     /*                                                       */
 +    6,   -6,     /*                       Headphones                      */
 +    0,  -11,
 +    0,   -5,
 +    4,    0};
 +
 +typedef struct {
 +    int16_t taps[NUMTAPS * 2];
 +} EarwaxContext;
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
++    int sample_rates[] = { 44100, -1 };
++
 +    AVFilterFormats *formats = NULL;
++    AVFilterChannelLayouts *layout = NULL;
++
 +    avfilter_add_format(&formats, AV_SAMPLE_FMT_S16);
 +    avfilter_set_common_sample_formats(ctx, formats);
++    ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
++    ff_set_common_channel_layouts(ctx, layout);
 +    formats = NULL;
 +    avfilter_add_format(&formats, AVFILTER_PACKED);
 +    avfilter_set_common_packing_formats(ctx, formats);
++    ff_set_common_samplerates(ctx, avfilter_make_format_list(sample_rates));
 +
 +    return 0;
 +}
 +
 +static int config_input(AVFilterLink *inlink)
 +{
 +    if (inlink->sample_rate != 44100) {
 +        av_log(inlink->dst, AV_LOG_ERROR,
 +               "The earwax filter only works for 44.1kHz audio. Insert "
 +               "a resample filter before this\n");
 +        return AVERROR(EINVAL);
 +    }
 +    return 0;
 +}
 +
 +//FIXME: replace with DSPContext.scalarproduct_int16
 +static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out)
 +{
 +    int32_t sample;
 +    int16_t j;
 +
 +    while (in < endin) {
 +        sample = 32;
 +        for (j = 0; j < NUMTAPS; j++)
 +            sample += in[j] * filt[j];
 +        *out = sample >> 6;
 +        out++;
 +        in++;
 +    }
 +
 +    return out;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 +{
 +    AVFilterLink *outlink = inlink->dst->outputs[0];
 +    int16_t *taps, *endin, *in, *out;
 +    AVFilterBufferRef *outsamples =
 +        ff_get_audio_buffer(inlink, AV_PERM_WRITE,
 +                                  insamples->audio->nb_samples);
 +    avfilter_copy_buffer_ref_props(outsamples, insamples);
 +
 +    taps  = ((EarwaxContext *)inlink->dst->priv)->taps;
 +    out   = (int16_t *)outsamples->data[0];
 +    in    = (int16_t *)insamples ->data[0];
 +
 +    // copy part of new input and process with saved input
 +    memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps));
 +    out   = scalarproduct(taps, taps + NUMTAPS, out);
 +
 +    // process current input
 +    endin = in + insamples->audio->nb_samples * 2 - NUMTAPS;
 +    out   = scalarproduct(in, endin, out);
 +
 +    // save part of input for next round
 +    memcpy(taps, endin, NUMTAPS * sizeof(*taps));
 +
 +    ff_filter_samples(outlink, outsamples);
 +    avfilter_unref_buffer(insamples);
 +}
 +
 +AVFilter avfilter_af_earwax = {
 +    .name           = "earwax",
 +    .description    = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
 +    .query_formats  = query_formats,
 +    .priv_size      = sizeof(EarwaxContext),
 +    .inputs  = (const AVFilterPad[])  {{  .name     = "default",
 +                                    .type           = AVMEDIA_TYPE_AUDIO,
 +                                    .filter_samples = filter_samples,
 +                                    .config_props   = config_input,
 +                                    .min_perms      = AV_PERM_READ, },
 +                                 {  .name = NULL}},
 +
 +    .outputs = (const AVFilterPad[])  {{  .name     = "default",
 +                                    .type           = AVMEDIA_TYPE_AUDIO, },
 +                                 {  .name = NULL}},
 +};
index fd65aac,0000000..3100a14
mode 100644,000000..100644
--- /dev/null
@@@ -1,383 -1,0 +1,384 @@@
-     AVFilterFormats *formats;
 +/*
 + * Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au>
 + * Copyright (c) 2011 Clément BÅ“sch <ubitux@gmail.com>
 + * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 + * GNU General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * Audio panning filter (channels mixing)
 + * Original code written by Anders Johansson for MPlayer,
 + * reimplemented for FFmpeg.
 + */
 +
 +#include <stdio.h>
 +#include "libavutil/avstring.h"
 +#include "libavutil/opt.h"
 +#include "libswresample/swresample.h"
 +#include "audio.h"
 +#include "avfilter.h"
++#include "formats.h"
 +
 +#define MAX_CHANNELS 63
 +
 +typedef struct PanContext {
 +    int64_t out_channel_layout;
 +    double gain[MAX_CHANNELS][MAX_CHANNELS];
 +    int64_t need_renorm;
 +    int need_renumber;
 +    int nb_input_channels;
 +    int nb_output_channels;
 +
 +    int pure_gains;
 +    /* channel mapping specific */
 +    int channel_map[SWR_CH_MAX];
 +    struct SwrContext *swr;
 +} PanContext;
 +
 +static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
 +{
 +    char buf[8];
 +    int len, i, channel_id = 0;
 +    int64_t layout, layout0;
 +
 +    /* try to parse a channel name, e.g. "FL" */
 +    if (sscanf(*arg, " %7[A-Z] %n", buf, &len)) {
 +        layout0 = layout = av_get_channel_layout(buf);
 +        /* channel_id <- first set bit in layout */
 +        for (i = 32; i > 0; i >>= 1) {
 +            if (layout >= (int64_t)1 << i) {
 +                channel_id += i;
 +                layout >>= i;
 +            }
 +        }
 +        /* reject layouts that are not a single channel */
 +        if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id)
 +            return AVERROR(EINVAL);
 +        *rchannel = channel_id;
 +        *rnamed = 1;
 +        *arg += len;
 +        return 0;
 +    }
 +    /* try to parse a channel number, e.g. "c2" */
 +    if (sscanf(*arg, " c%d %n", &channel_id, &len) &&
 +        channel_id >= 0 && channel_id < MAX_CHANNELS) {
 +        *rchannel = channel_id;
 +        *rnamed = 0;
 +        *arg += len;
 +        return 0;
 +    }
 +    return AVERROR(EINVAL);
 +}
 +
 +static void skip_spaces(char **arg)
 +{
 +    int len = 0;
 +
 +    sscanf(*arg, " %n", &len);
 +    *arg += len;
 +}
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args0, void *opaque)
 +{
 +    PanContext *const pan = ctx->priv;
 +    char *arg, *arg0, *tokenizer, *args = av_strdup(args0);
 +    int out_ch_id, in_ch_id, len, named;
 +    int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels
 +    double gain;
 +
 +    if (!args0) {
 +        av_log(ctx, AV_LOG_ERROR,
 +               "pan filter needs a channel layout and a set "
 +               "of channels definitions as parameter\n");
 +        return AVERROR(EINVAL);
 +    }
 +    if (!args)
 +        return AVERROR(ENOMEM);
 +    arg = av_strtok(args, ":", &tokenizer);
 +    pan->out_channel_layout = av_get_channel_layout(arg);
 +    if (!pan->out_channel_layout) {
 +        av_log(ctx, AV_LOG_ERROR, "Unknown channel layout \"%s\"\n", arg);
 +        return AVERROR(EINVAL);
 +    }
 +    pan->nb_output_channels = av_get_channel_layout_nb_channels(pan->out_channel_layout);
 +
 +    /* parse channel specifications */
 +    while ((arg = arg0 = av_strtok(NULL, ":", &tokenizer))) {
 +        /* channel name */
 +        if (parse_channel_name(&arg, &out_ch_id, &named)) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Expected out channel name, got \"%.8s\"\n", arg);
 +            return AVERROR(EINVAL);
 +        }
 +        if (named) {
 +            if (!((pan->out_channel_layout >> out_ch_id) & 1)) {
 +                av_log(ctx, AV_LOG_ERROR,
 +                       "Channel \"%.8s\" does not exist in the chosen layout\n", arg0);
 +                return AVERROR(EINVAL);
 +            }
 +            /* get the channel number in the output channel layout:
 +             * out_channel_layout & ((1 << out_ch_id) - 1) are all the
 +             * channels that come before out_ch_id,
 +             * so their count is the index of out_ch_id */
 +            out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1));
 +        }
 +        if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Invalid out channel name \"%.8s\"\n", arg0);
 +            return AVERROR(EINVAL);
 +        }
 +        if (*arg == '=') {
 +            arg++;
 +        } else if (*arg == '<') {
 +            pan->need_renorm |= (int64_t)1 << out_ch_id;
 +            arg++;
 +        } else {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Syntax error after channel name in \"%.8s\"\n", arg0);
 +            return AVERROR(EINVAL);
 +        }
 +        /* gains */
 +        while (1) {
 +            gain = 1;
 +            if (sscanf(arg, " %lf %n* %n", &gain, &len, &len))
 +                arg += len;
 +            if (parse_channel_name(&arg, &in_ch_id, &named)){
 +                av_log(ctx, AV_LOG_ERROR,
 +                       "Expected in channel name, got \"%.8s\"\n", arg);
 +                return AVERROR(EINVAL);
 +            }
 +            nb_in_channels[named]++;
 +            if (nb_in_channels[!named]) {
 +                av_log(ctx, AV_LOG_ERROR,
 +                       "Can not mix named and numbered channels\n");
 +                return AVERROR(EINVAL);
 +            }
 +            pan->gain[out_ch_id][in_ch_id] = gain;
 +            if (!*arg)
 +                break;
 +            if (*arg != '+') {
 +                av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg);
 +                return AVERROR(EINVAL);
 +            }
 +            arg++;
 +            skip_spaces(&arg);
 +        }
 +    }
 +    pan->need_renumber = !!nb_in_channels[1];
 +
 +    av_free(args);
 +    return 0;
 +}
 +
 +static int are_gains_pure(const PanContext *pan)
 +{
 +    int i, j;
 +
 +    for (i = 0; i < MAX_CHANNELS; i++) {
 +        int nb_gain = 0;
 +
 +        for (j = 0; j < MAX_CHANNELS; j++) {
 +            double gain = pan->gain[i][j];
 +
 +            /* channel mapping is effective only if 0% or 100% of a channel is
 +             * selected... */
 +            if (gain != 0. && gain != 1.)
 +                return 0;
 +            /* ...and if the output channel is only composed of one input */
 +            if (gain && nb_gain++)
 +                return 0;
 +        }
 +    }
 +    return 1;
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    PanContext *pan = ctx->priv;
 +    AVFilterLink *inlink  = ctx->inputs[0];
 +    AVFilterLink *outlink = ctx->outputs[0];
-     formats = avfilter_make_all_channel_layouts();
-     avfilter_formats_ref(formats, &inlink->out_chlayouts);
++    AVFilterChannelLayouts *layouts;
 +
 +    pan->pure_gains = are_gains_pure(pan);
 +    /* libswr supports any sample and packing formats */
 +    avfilter_set_common_sample_formats(ctx, avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO));
 +    avfilter_set_common_packing_formats(ctx, avfilter_make_all_packing_formats());
 +
 +    // inlink supports any channel layout
-     formats = NULL;
-     avfilter_add_format(&formats, pan->out_channel_layout);
-     avfilter_formats_ref(formats, &outlink->in_chlayouts);
++    layouts = ff_all_channel_layouts();
++    ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
 +
 +    // outlink supports only requested output channel layout
++    layouts = NULL;
++    ff_add_channel_layout(&layouts, pan->out_channel_layout);
++    ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
 +    return 0;
 +}
 +
 +static int config_props(AVFilterLink *link)
 +{
 +    AVFilterContext *ctx = link->dst;
 +    PanContext *pan = ctx->priv;
 +    char buf[1024], *cur;
 +    int i, j, k, r;
 +    double t;
 +
 +    pan->nb_input_channels = av_get_channel_layout_nb_channels(link->channel_layout);
 +    if (pan->need_renumber) {
 +        // input channels were given by their name: renumber them
 +        for (i = j = 0; i < MAX_CHANNELS; i++) {
 +            if ((link->channel_layout >> i) & 1) {
 +                for (k = 0; k < pan->nb_output_channels; k++)
 +                    pan->gain[k][j] = pan->gain[k][i];
 +                j++;
 +            }
 +        }
 +    }
 +
 +    // sanity check; can't be done in query_formats since the inlink
 +    // channel layout is unknown at that time
 +    if (pan->nb_input_channels > SWR_CH_MAX ||
 +        pan->nb_output_channels > SWR_CH_MAX) {
 +        av_log(ctx, AV_LOG_ERROR,
 +               "libswresample support a maximum of %d channels. "
 +               "Feel free to ask for a higher limit.\n", SWR_CH_MAX);
 +        return AVERROR_PATCHWELCOME;
 +    }
 +
 +    // init libswresample context
 +    pan->swr = swr_alloc_set_opts(pan->swr,
 +                                  pan->out_channel_layout, link->format, link->sample_rate,
 +                                  link->channel_layout,    link->format, link->sample_rate,
 +                                  0, ctx);
 +    if (!pan->swr)
 +        return AVERROR(ENOMEM);
 +
 +    // gains are pure, init the channel mapping
 +    if (pan->pure_gains) {
 +
 +        // get channel map from the pure gains
 +        for (i = 0; i < pan->nb_output_channels; i++) {
 +            int ch_id = -1;
 +            for (j = 0; j < pan->nb_input_channels; j++) {
 +                if (pan->gain[i][j]) {
 +                    ch_id = j;
 +                    break;
 +                }
 +            }
 +            pan->channel_map[i] = ch_id;
 +        }
 +
 +        av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0);
 +        av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0);
 +        swr_set_channel_mapping(pan->swr, pan->channel_map);
 +    } else {
 +        // renormalize
 +        for (i = 0; i < pan->nb_output_channels; i++) {
 +            if (!((pan->need_renorm >> i) & 1))
 +                continue;
 +            t = 0;
 +            for (j = 0; j < pan->nb_input_channels; j++)
 +                t += pan->gain[i][j];
 +            if (t > -1E-5 && t < 1E-5) {
 +                // t is almost 0 but not exactly, this is probably a mistake
 +                if (t)
 +                    av_log(ctx, AV_LOG_WARNING,
 +                           "Degenerate coefficients while renormalizing\n");
 +                continue;
 +            }
 +            for (j = 0; j < pan->nb_input_channels; j++)
 +                pan->gain[i][j] /= t;
 +        }
 +        av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
 +        av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0);
 +        swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]);
 +    }
 +
 +    r = swr_init(pan->swr);
 +    if (r < 0)
 +        return r;
 +
 +    // summary
 +    for (i = 0; i < pan->nb_output_channels; i++) {
 +        cur = buf;
 +        for (j = 0; j < pan->nb_input_channels; j++) {
 +            r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
 +                         j ? " + " : "", pan->gain[i][j], j);
 +            cur += FFMIN(buf + sizeof(buf) - cur, r);
 +        }
 +        av_log(ctx, AV_LOG_INFO, "o%d = %s\n", i, buf);
 +    }
 +    // add channel mapping summary if possible
 +    if (pan->pure_gains) {
 +        av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:");
 +        for (i = 0; i < pan->nb_output_channels; i++)
 +            if (pan->channel_map[i] < 0)
 +                av_log(ctx, AV_LOG_INFO, " M");
 +            else
 +                av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]);
 +        av_log(ctx, AV_LOG_INFO, "\n");
 +        return 0;
 +    }
 +    return 0;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 +{
 +    int n = insamples->audio->nb_samples;
 +    AVFilterLink *const outlink = inlink->dst->outputs[0];
 +    AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
 +    PanContext *pan = inlink->dst->priv;
 +
 +    swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n);
 +    avfilter_copy_buffer_ref_props(outsamples, insamples);
 +    outsamples->audio->channel_layout = outlink->channel_layout;
 +    outsamples->audio->planar         = outlink->planar;
 +
 +    ff_filter_samples(outlink, outsamples);
 +    avfilter_unref_buffer(insamples);
 +}
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    PanContext *pan = ctx->priv;
 +    swr_free(&pan->swr);
 +}
 +
 +AVFilter avfilter_af_pan = {
 +    .name          = "pan",
 +    .description   = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."),
 +    .priv_size     = sizeof(PanContext),
 +    .init          = init,
 +    .uninit        = uninit,
 +    .query_formats = query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {
 +        { .name             = "default",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .config_props     = config_props,
 +          .filter_samples   = filter_samples,
 +          .min_perms        = AV_PERM_READ, },
 +        { .name = NULL}
 +    },
 +    .outputs   = (const AVFilterPad[]) {
 +        { .name             = "default",
 +          .type             = AVMEDIA_TYPE_AUDIO, },
 +        { .name = NULL}
 +    },
 +};
index 093ca2b,0000000..4f5d8e0
mode 100644,000000..100644
--- /dev/null
@@@ -1,176 -1,0 +1,183 @@@
-     formats = avfilter_make_all_channel_layouts();
-     if (!formats)
 +/*
 + * Copyright (c) 2012 Clément BÅ“sch <ubitux@gmail.com>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * Audio silence detector
 + */
 +
 +#include "libavutil/opt.h"
 +#include "libavutil/timestamp.h"
 +#include "audio.h"
++#include "formats.h"
 +#include "avfilter.h"
 +
 +typedef struct {
 +    const AVClass *class;
 +    char *noise_str;            ///< noise option string
 +    double noise;               ///< noise amplitude ratio
 +    int duration;               ///< minimum duration of silence until notification
 +    int64_t nb_null_samples;    ///< current number of continuous zero samples
 +    int64_t start;              ///< if silence is detected, this value contains the time of the first zero sample
 +    int last_sample_rate;       ///< last sample rate to check for sample rate changes
 +} SilenceDetectContext;
 +
 +#define OFFSET(x) offsetof(SilenceDetectContext, x)
 +static const AVOption silencedetect_options[] = {
 +    { "n",         "set noise tolerance",              OFFSET(noise_str), AV_OPT_TYPE_STRING, {.str="-60dB"}, CHAR_MIN, CHAR_MAX },
 +    { "noise",     "set noise tolerance",              OFFSET(noise_str), AV_OPT_TYPE_STRING, {.str="-60dB"}, CHAR_MIN, CHAR_MAX },
 +    { "d",         "set minimum duration in seconds",  OFFSET(duration),  AV_OPT_TYPE_INT,    {.dbl=2},    0, INT_MAX},
 +    { "duration",  "set minimum duration in seconds",  OFFSET(duration),  AV_OPT_TYPE_INT,    {.dbl=2},    0, INT_MAX},
 +    { NULL },
 +};
 +
 +static const char *silencedetect_get_name(void *ctx)
 +{
 +    return "silencedetect";
 +}
 +
 +static const AVClass silencedetect_class = {
 +    .class_name = "SilenceDetectContext",
 +    .item_name  = silencedetect_get_name,
 +    .option     = silencedetect_options,
 +};
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    int ret;
 +    char *tail;
 +    SilenceDetectContext *silence = ctx->priv;
 +
 +    silence->class = &silencedetect_class;
 +    av_opt_set_defaults(silence);
 +
 +    if ((ret = av_set_options_string(silence, args, "=", ":")) < 0) {
 +        av_log(ctx, AV_LOG_ERROR, "Error parsing options string: '%s'\n", args);
 +        return ret;
 +    }
 +
 +    silence->noise = strtod(silence->noise_str, &tail);
 +    if (!strcmp(tail, "dB")) {
 +        silence->noise = pow(10, silence->noise/20);
 +    } else if (*tail) {
 +        av_log(ctx, AV_LOG_ERROR, "Invalid value '%s' for noise parameter.\n",
 +               silence->noise_str);
 +        return AVERROR(EINVAL);
 +    }
 +
 +    return 0;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 +{
 +    int i;
 +    SilenceDetectContext *silence = inlink->dst->priv;
 +    const int nb_channels           = av_get_channel_layout_nb_channels(inlink->channel_layout);
 +    const int srate                 = inlink->sample_rate;
 +    const int nb_samples            = insamples->audio->nb_samples * nb_channels;
 +    const int64_t nb_samples_notify = srate * silence->duration    * nb_channels;
 +
 +    // scale number of null samples to the new sample rate
 +    if (silence->last_sample_rate && silence->last_sample_rate != srate)
 +        silence->nb_null_samples =
 +            srate * silence->nb_null_samples / silence->last_sample_rate;
 +    silence->last_sample_rate = srate;
 +
 +    // TODO: support more sample formats
 +    if (insamples->format == AV_SAMPLE_FMT_DBL) {
 +        double *p = (double *)insamples->data[0];
 +
 +        for (i = 0; i < nb_samples; i++, p++) {
 +            if (*p < silence->noise && *p > -silence->noise) {
 +                if (!silence->start) {
 +                    silence->nb_null_samples++;
 +                    if (silence->nb_null_samples >= nb_samples_notify) {
 +                        silence->start = insamples->pts - silence->duration / av_q2d(inlink->time_base);
 +                        av_log(silence, AV_LOG_INFO,
 +                               "silence_start: %s\n", av_ts2timestr(silence->start, &inlink->time_base));
 +                    }
 +                }
 +            } else {
 +                if (silence->start)
 +                    av_log(silence, AV_LOG_INFO,
 +                           "silence_end: %s | silence_duration: %s\n",
 +                           av_ts2timestr(insamples->pts,                  &inlink->time_base),
 +                           av_ts2timestr(insamples->pts - silence->start, &inlink->time_base));
 +                silence->nb_null_samples = silence->start = 0;
 +            }
 +        }
 +    }
 +
 +    ff_filter_samples(inlink->dst->outputs[0], insamples);
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    AVFilterFormats *formats = NULL;
++    AVFilterChannelLayouts *layouts = NULL;
 +    enum AVSampleFormat sample_fmts[] = {
 +        AV_SAMPLE_FMT_DBL,
 +        AV_SAMPLE_FMT_NONE
 +    };
 +    int packing_fmts[] = { AVFILTER_PACKED, -1 };
 +
-     avfilter_set_common_channel_layouts(ctx, formats);
++    layouts = ff_all_channel_layouts();
++    if (!layouts)
 +        return AVERROR(ENOMEM);
++    ff_set_common_channel_layouts(ctx, layouts);
 +
 +    formats = avfilter_make_format_list(sample_fmts);
 +    if (!formats)
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +
 +    formats = avfilter_make_format_list(packing_fmts);
 +    if (!formats)
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +
++    formats = ff_all_samplerates();
++    if (!formats)
++        return AVERROR(ENOMEM);
++    ff_set_common_samplerates(ctx, formats);
++
 +    return 0;
 +}
 +
 +AVFilter avfilter_af_silencedetect = {
 +    .name          = "silencedetect",
 +    .description   = NULL_IF_CONFIG_SMALL("Detect silence."),
 +    .priv_size     = sizeof(SilenceDetectContext),
 +    .init          = init,
 +    .query_formats = query_formats,
 +
 +    .inputs = (const AVFilterPad[]) {
 +        { .name             = "default",
 +          .type             = AVMEDIA_TYPE_AUDIO,
 +          .get_audio_buffer = ff_null_get_audio_buffer,
 +          .filter_samples   = filter_samples, },
 +        { .name = NULL }
 +    },
 +    .outputs = (const AVFilterPad[]) {
 +        { .name = "default",
 +          .type = AVMEDIA_TYPE_AUDIO, },
 +        { .name = NULL }
 +    },
 +};
index 8e2e37e,0000000..50f3cbd
mode 100644,000000..100644
--- /dev/null
@@@ -1,190 -1,0 +1,197 @@@
-     formats = avfilter_make_all_channel_layouts();
-     if (!formats)
 +/*
 + * Copyright (c) 2011 Stefano Sabatini
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * audio volume filter
 + * based on ffmpeg.c code
 + */
 +
 +#include "libavutil/audioconvert.h"
 +#include "libavutil/eval.h"
 +#include "audio.h"
 +#include "avfilter.h"
++#include "formats.h"
 +
 +typedef struct {
 +    double volume;
 +    int    volume_i;
 +} VolumeContext;
 +
 +static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    VolumeContext *vol = ctx->priv;
 +    char *tail;
 +    int ret = 0;
 +
 +    vol->volume = 1.0;
 +
 +    if (args) {
 +        /* parse the number as a decimal number */
 +        double d = strtod(args, &tail);
 +
 +        if (*tail) {
 +            if (!strcmp(tail, "dB")) {
 +                /* consider the argument an adjustement in decibels */
 +                d = pow(10, d/20);
 +            } else {
 +                /* parse the argument as an expression */
 +                ret = av_expr_parse_and_eval(&d, args, NULL, NULL,
 +                                             NULL, NULL, NULL, NULL,
 +                                             NULL, 0, ctx);
 +            }
 +        }
 +
 +        if (ret < 0) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Invalid volume argument '%s'\n", args);
 +            return AVERROR(EINVAL);
 +        }
 +
 +        if (d < 0 || d > 65536) { /* 65536 = INT_MIN / (128 * 256) */
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Negative or too big volume value %f\n", d);
 +            return AVERROR(EINVAL);
 +        }
 +
 +        vol->volume = d;
 +    }
 +
 +    vol->volume_i = (int)(vol->volume * 256 + 0.5);
 +    av_log(ctx, AV_LOG_INFO, "volume=%f\n", vol->volume);
 +    return 0;
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    AVFilterFormats *formats = NULL;
++    AVFilterChannelLayouts *layouts;
 +    enum AVSampleFormat sample_fmts[] = {
 +        AV_SAMPLE_FMT_U8,
 +        AV_SAMPLE_FMT_S16,
 +        AV_SAMPLE_FMT_S32,
 +        AV_SAMPLE_FMT_FLT,
 +        AV_SAMPLE_FMT_DBL,
 +        AV_SAMPLE_FMT_NONE
 +    };
 +    int packing_fmts[] = { AVFILTER_PACKED, -1 };
 +
-     avfilter_set_common_channel_layouts(ctx, formats);
++    layouts = ff_all_channel_layouts();
++    if (!layouts)
 +        return AVERROR(ENOMEM);
++    ff_set_common_channel_layouts(ctx, layouts);
 +
 +    formats = avfilter_make_format_list(sample_fmts);
 +    if (!formats)
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +
 +    formats = avfilter_make_format_list(packing_fmts);
 +    if (!formats)
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +
++    formats = ff_all_samplerates();
++    if (!formats)
++        return AVERROR(ENOMEM);
++    ff_set_common_samplerates(ctx, formats);
++
 +    return 0;
 +}
 +
 +static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *insamples)
 +{
 +    VolumeContext *vol = inlink->dst->priv;
 +    AVFilterLink *outlink = inlink->dst->outputs[0];
 +    const int nb_samples = insamples->audio->nb_samples *
 +        av_get_channel_layout_nb_channels(insamples->audio->channel_layout);
 +    const double volume   = vol->volume;
 +    const int    volume_i = vol->volume_i;
 +    int i;
 +
 +    if (volume_i != 256) {
 +        switch (insamples->format) {
 +        case AV_SAMPLE_FMT_U8:
 +        {
 +            uint8_t *p = (void *)insamples->data[0];
 +            for (i = 0; i < nb_samples; i++) {
 +                int v = (((*p - 128) * volume_i + 128) >> 8) + 128;
 +                *p++ = av_clip_uint8(v);
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_S16:
 +        {
 +            int16_t *p = (void *)insamples->data[0];
 +            for (i = 0; i < nb_samples; i++) {
 +                int v = ((int64_t)*p * volume_i + 128) >> 8;
 +                *p++ = av_clip_int16(v);
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_S32:
 +        {
 +            int32_t *p = (void *)insamples->data[0];
 +            for (i = 0; i < nb_samples; i++) {
 +                int64_t v = (((int64_t)*p * volume_i + 128) >> 8);
 +                *p++ = av_clipl_int32(v);
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_FLT:
 +        {
 +            float *p = (void *)insamples->data[0];
 +            float scale = (float)volume;
 +            for (i = 0; i < nb_samples; i++) {
 +                *p++ *= scale;
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_DBL:
 +        {
 +            double *p = (void *)insamples->data[0];
 +            for (i = 0; i < nb_samples; i++) {
 +                *p *= volume;
 +                p++;
 +            }
 +            break;
 +        }
 +        }
 +    }
 +    ff_filter_samples(outlink, insamples);
 +}
 +
 +AVFilter avfilter_af_volume = {
 +    .name           = "volume",
 +    .description    = NULL_IF_CONFIG_SMALL("Change input volume."),
 +    .query_formats  = query_formats,
 +    .priv_size      = sizeof(VolumeContext),
 +    .init           = init,
 +
 +    .inputs  = (const AVFilterPad[])  {{ .name     = "default",
 +                                   .type           = AVMEDIA_TYPE_AUDIO,
 +                                   .filter_samples = filter_samples,
 +                                   .min_perms      = AV_PERM_READ|AV_PERM_WRITE},
 +                                 { .name = NULL}},
 +
 +    .outputs = (const AVFilterPad[])  {{ .name     = "default",
 +                                   .type           = AVMEDIA_TYPE_AUDIO, },
 +                                 { .name = NULL}},
 +};
@@@ -34,34 -34,17 +34,34 @@@ void avfilter_register_all(void
          return;
      initialized = 1;
  
 +    REGISTER_FILTER (ACONVERT,    aconvert,    af);
      REGISTER_FILTER (AFORMAT,     aformat,     af);
 +    REGISTER_FILTER (AMERGE,      amerge,      af);
      REGISTER_FILTER (ANULL,       anull,       af);
 +    REGISTER_FILTER (ARESAMPLE,   aresample,   af);
 +    REGISTER_FILTER (ASHOWINFO,   ashowinfo,   af);
 +    REGISTER_FILTER (ASPLIT,      asplit,      af);
 +    REGISTER_FILTER (ASTREAMSYNC, astreamsync, af);
+     REGISTER_FILTER (ASYNCTS,     asyncts,     af);
 +    REGISTER_FILTER (EARWAX,      earwax,      af);
 +    REGISTER_FILTER (PAN,         pan,         af);
 +    REGISTER_FILTER (SILENCEDETECT, silencedetect, af);
 +    REGISTER_FILTER (VOLUME,      volume,      af);
      REGISTER_FILTER (RESAMPLE,    resample,    af);
  
-     REGISTER_FILTER (ABUFFER,     abuffer,     asrc);
 +    REGISTER_FILTER (AEVALSRC,    aevalsrc,    asrc);
 +    REGISTER_FILTER (AMOVIE,      amovie,      asrc);
      REGISTER_FILTER (ANULLSRC,    anullsrc,    asrc);
  
 +    REGISTER_FILTER (ABUFFERSINK, abuffersink, asink);
      REGISTER_FILTER (ANULLSINK,   anullsink,   asink);
  
 +    REGISTER_FILTER (ASS,         ass,         vf);
 +    REGISTER_FILTER (BBOX,        bbox,        vf);
 +    REGISTER_FILTER (BLACKDETECT, blackdetect, vf);
      REGISTER_FILTER (BLACKFRAME,  blackframe,  vf);
      REGISTER_FILTER (BOXBLUR,     boxblur,     vf);
 +    REGISTER_FILTER (COLORMATRIX, colormatrix, vf);
      REGISTER_FILTER (COPY,        copy,        vf);
      REGISTER_FILTER (CROP,        crop,        vf);
      REGISTER_FILTER (CROPDETECT,  cropdetect,  vf);
index 4abcbc4,0000000..94197b2
mode 100644,000000..100644
--- /dev/null
@@@ -1,248 -1,0 +1,248 @@@
-     avfilter_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
 +/*
 + * Copyright (c) 2011 Stefano Sabatini
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * eval audio source
 + */
 +
 +#include "libavutil/audioconvert.h"
 +#include "libavutil/avassert.h"
 +#include "libavutil/avstring.h"
 +#include "libavutil/eval.h"
 +#include "libavutil/opt.h"
 +#include "libavutil/parseutils.h"
 +#include "avfilter.h"
 +#include "audio.h"
 +#include "internal.h"
 +
 +static const char * const var_names[] = {
 +    "n",            ///< number of frame
 +    "t",            ///< timestamp expressed in seconds
 +    "s",            ///< sample rate
 +    NULL
 +};
 +
 +enum var_name {
 +    VAR_N,
 +    VAR_T,
 +    VAR_S,
 +    VAR_VARS_NB
 +};
 +
 +typedef struct {
 +    const AVClass *class;
 +    char *sample_rate_str;
 +    int sample_rate;
 +    int64_t chlayout;
 +    int nb_channels;
 +    int64_t pts;
 +    AVExpr *expr[8];
 +    char *expr_str[8];
 +    int nb_samples;             ///< number of samples per requested frame
 +    char *duration_str;         ///< total duration of the generated audio
 +    double duration;
 +    uint64_t n;
 +    double var_values[VAR_VARS_NB];
 +} EvalContext;
 +
 +#define OFFSET(x) offsetof(EvalContext, x)
 +
 +static const AVOption eval_options[]= {
 +    { "nb_samples",  "set the number of samples per requested frame", OFFSET(nb_samples),      AV_OPT_TYPE_INT,    {.dbl = 1024},    0,        INT_MAX },
 +    { "n",           "set the number of samples per requested frame", OFFSET(nb_samples),      AV_OPT_TYPE_INT,    {.dbl = 1024},    0,        INT_MAX },
 +    { "sample_rate", "set the sample rate",                           OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX },
 +    { "s",           "set the sample rate",                           OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, CHAR_MIN, CHAR_MAX },
 +    { "duration",    "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0 },
 +    { "d",           "set audio duration", OFFSET(duration_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0 },
 +{NULL},
 +};
 +
 +static const char *eval_get_name(void *ctx)
 +{
 +    return "aevalsrc";
 +}
 +
 +static const AVClass eval_class = {
 +    "AEvalSrcContext",
 +    eval_get_name,
 +    eval_options
 +};
 +
 +static int init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    EvalContext *eval = ctx->priv;
 +    char *args1 = av_strdup(args);
 +    char *expr, *buf, *bufptr;
 +    int ret, i;
 +
 +    eval->class = &eval_class;
 +    av_opt_set_defaults(eval);
 +
 +    /* parse expressions */
 +    buf = args1;
 +    i = 0;
 +    while (expr = av_strtok(buf, ":", &bufptr)) {
 +        if (i >= 8) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "More than 8 expressions provided, unsupported.\n");
 +            ret = AVERROR(EINVAL);
 +            return ret;
 +        }
 +        ret = av_expr_parse(&eval->expr[i], expr, var_names,
 +                            NULL, NULL, NULL, NULL, 0, ctx);
 +        if (ret < 0)
 +            goto end;
 +        i++;
 +        if (bufptr && *bufptr == ':') { /* found last expression */
 +            bufptr++;
 +            break;
 +        }
 +        buf = NULL;
 +    }
 +
 +    /* guess channel layout from nb expressions/channels */
 +    eval->nb_channels = i;
 +    eval->chlayout = av_get_default_channel_layout(eval->nb_channels);
 +    if (!eval->chlayout) {
 +        av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n",
 +               eval->nb_channels);
 +        ret = AVERROR(EINVAL);
 +        goto end;
 +    }
 +
 +    if (bufptr && (ret = av_set_options_string(eval, bufptr, "=", ":")) < 0)
 +        goto end;
 +
 +    if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx)))
 +        goto end;
 +
 +    eval->duration = -1;
 +    if (eval->duration_str) {
 +        int64_t us = -1;
 +        if ((ret = av_parse_time(&us, eval->duration_str, 1)) < 0) {
 +            av_log(ctx, AV_LOG_ERROR, "Invalid duration: '%s'\n", eval->duration_str);
 +            goto end;
 +        }
 +        eval->duration = (double)us / 1000000;
 +    }
 +    eval->n = 0;
 +
 +end:
 +    av_free(args1);
 +    return ret;
 +}
 +
 +static void uninit(AVFilterContext *ctx)
 +{
 +    EvalContext *eval = ctx->priv;
 +    int i;
 +
 +    for (i = 0; i < 8; i++) {
 +        av_expr_free(eval->expr[i]);
 +        eval->expr[i] = NULL;
 +    }
 +    av_freep(&eval->duration_str);
 +    av_freep(&eval->sample_rate_str);
 +}
 +
 +static int config_props(AVFilterLink *outlink)
 +{
 +    EvalContext *eval = outlink->src->priv;
 +    char buf[128];
 +
 +    outlink->time_base = (AVRational){1, eval->sample_rate};
 +    outlink->sample_rate = eval->sample_rate;
 +
 +    eval->var_values[VAR_S] = eval->sample_rate;
 +
 +    av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout);
 +
 +    av_log(outlink->src, AV_LOG_INFO,
 +           "sample_rate:%d chlayout:%s duration:%f\n",
 +           eval->sample_rate, buf, eval->duration);
 +
 +    return 0;
 +}
 +
 +static int query_formats(AVFilterContext *ctx)
 +{
 +    EvalContext *eval = ctx->priv;
 +    enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE };
 +    int64_t chlayouts[] = { eval->chlayout, -1 };
 +    int packing_fmts[] = { AVFILTER_PLANAR, -1 };
 +
 +    avfilter_set_common_sample_formats (ctx, avfilter_make_format_list(sample_fmts));
++    ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
 +    avfilter_set_common_packing_formats(ctx, avfilter_make_format_list(packing_fmts));
 +
 +    return 0;
 +}
 +
 +static int request_frame(AVFilterLink *outlink)
 +{
 +    EvalContext *eval = outlink->src->priv;
 +    AVFilterBufferRef *samplesref;
 +    int i, j;
 +    double t = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
 +
 +    if (eval->duration >= 0 && t > eval->duration)
 +        return AVERROR_EOF;
 +
 +    samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples);
 +
 +    /* evaluate expression for each single sample and for each channel */
 +    for (i = 0; i < eval->nb_samples; i++, eval->n++) {
 +        eval->var_values[VAR_N] = eval->n;
 +        eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
 +
 +        for (j = 0; j < eval->nb_channels; j++) {
 +            *((double *) samplesref->data[j] + i) =
 +                av_expr_eval(eval->expr[j], eval->var_values, NULL);
 +        }
 +    }
 +
 +    samplesref->pts = eval->pts;
 +    samplesref->pos = -1;
 +    samplesref->audio->sample_rate = eval->sample_rate;
 +    eval->pts += eval->nb_samples;
 +
 +    ff_filter_samples(outlink, samplesref);
 +
 +    return 0;
 +}
 +
 +AVFilter avfilter_asrc_aevalsrc = {
 +    .name        = "aevalsrc",
 +    .description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
 +
 +    .query_formats = query_formats,
 +    .init        = init,
 +    .uninit      = uninit,
 +    .priv_size   = sizeof(EvalContext),
 +
 +    .inputs      = (const AVFilterPad[]) {{ .name = NULL}},
 +
 +    .outputs     = (const AVFilterPad[]) {{ .name = "default",
 +                                      .type = AVMEDIA_TYPE_AUDIO,
 +                                      .config_props = config_props,
 +                                      .request_frame = request_frame, },
 +                                    { .name = NULL}},
 +};
index d4c92f9,0000000..ce003ab
mode 100644,000000..100644
--- /dev/null
@@@ -1,168 -1,0 +1,97 @@@
- int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
- {
-     dst->pts    = src->pts;
-     dst->pos    = src->pkt_pos;
-     dst->format = src->format;
-     switch (dst->type) {
-     case AVMEDIA_TYPE_VIDEO:
-         dst->video->w                   = src->width;
-         dst->video->h                   = src->height;
-         dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
-         dst->video->interlaced          = src->interlaced_frame;
-         dst->video->top_field_first     = src->top_field_first;
-         dst->video->key_frame           = src->key_frame;
-         dst->video->pict_type           = src->pict_type;
-         break;
-     case AVMEDIA_TYPE_AUDIO:
-         dst->audio->sample_rate         = src->sample_rate;
-         dst->audio->channel_layout      = src->channel_layout;
-         break;
-     default:
-         return AVERROR(EINVAL);
-     }
-     return 0;
- }
- int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src)
- {
-     int planes, nb_channels;
-     memcpy(dst->data, src->data, sizeof(dst->data));
-     memcpy(dst->linesize, src->linesize, sizeof(dst->linesize));
-     dst->pts     = src->pts;
-     dst->format  = src->format;
-     switch (src->type) {
-     case AVMEDIA_TYPE_VIDEO:
-         dst->width               = src->video->w;
-         dst->height              = src->video->h;
-         dst->sample_aspect_ratio = src->video->sample_aspect_ratio;
-         dst->interlaced_frame    = src->video->interlaced;
-         dst->top_field_first     = src->video->top_field_first;
-         dst->key_frame           = src->video->key_frame;
-         dst->pict_type           = src->video->pict_type;
-         break;
-     case AVMEDIA_TYPE_AUDIO:
-         nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout);
-         planes      = av_sample_fmt_is_planar(src->format) ? nb_channels : 1;
-         if (planes > FF_ARRAY_ELEMS(dst->data)) {
-             dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data));
-             if (!dst->extended_data)
-                 return AVERROR(ENOMEM);
-             memcpy(dst->extended_data, src->extended_data,
-                    planes * sizeof(dst->extended_data));
-         } else
-             dst->extended_data = dst->data;
-         dst->sample_rate         = src->audio->sample_rate;
-         dst->channel_layout      = src->audio->channel_layout;
-         dst->nb_samples          = src->audio->nb_samples;
-         break;
-     default:
-         return AVERROR(EINVAL);
-     }
-     return 0;
- }
 +/*
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * libavcodec/libavfilter gluing utilities
 + */
 +
 +#include "avcodec.h"
 +#include "libavutil/opt.h"
 +
 +AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame,
 +                                                            int perms)
 +{
 +    AVFilterBufferRef *picref =
 +        avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize, perms,
 +                                                  frame->width, frame->height,
 +                                                  frame->format);
 +    if (!picref)
 +        return NULL;
 +    avfilter_copy_frame_props(picref, frame);
 +    return picref;
 +}
 +
 +AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
 +                                                            int perms)
 +{
 +    AVFilterBufferRef *picref =
 +        avfilter_get_audio_buffer_ref_from_arrays((uint8_t **)frame->data, frame->linesize[0], perms,
 +                                                  frame->nb_samples, frame->format,
 +                                                  av_frame_get_channel_layout(frame));
 +    if (!picref)
 +        return NULL;
 +    avfilter_copy_frame_props(picref, frame);
 +    return picref;
 +}
 +
 +int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame,
 +                                              const AVFilterBufferRef *samplesref)
 +{
 +    if (!samplesref || !samplesref->audio || !frame)
 +        return AVERROR(EINVAL);
 +
 +    memcpy(frame->data, samplesref->data, sizeof(frame->data));
 +    frame->pkt_pos    = samplesref->pos;
 +    frame->format     = samplesref->format;
 +    frame->nb_samples = samplesref->audio->nb_samples;
 +    frame->pts        = samplesref->pts;
 +
 +    return 0;
 +}
 +
 +int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame,
 +                                              const AVFilterBufferRef *picref)
 +{
 +    if (!picref || !picref->video || !frame)
 +        return AVERROR(EINVAL);
 +
 +    memcpy(frame->data,     picref->data,     sizeof(frame->data));
 +    memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));
 +    frame->pkt_pos          = picref->pos;
 +    frame->interlaced_frame = picref->video->interlaced;
 +    frame->top_field_first  = picref->video->top_field_first;
 +    frame->key_frame        = picref->video->key_frame;
 +    frame->pict_type        = picref->video->pict_type;
 +    frame->sample_aspect_ratio = picref->video->sample_aspect_ratio;
 +    frame->width            = picref->video->w;
 +    frame->height           = picref->video->h;
 +    frame->format           = picref->format;
 +    frame->pts              = picref->pts;
 +
 +    return 0;
 +}
 +
 +int avfilter_fill_frame_from_buffer_ref(AVFrame *frame,
 +                                        const AVFilterBufferRef *ref)
 +{
 +    if (!ref)
 +        return AVERROR(EINVAL);
 +    return ref->video ? avfilter_fill_frame_from_video_buffer_ref(frame, ref)
 +                      : avfilter_fill_frame_from_audio_buffer_ref(frame, ref);
 +}
  #include "libavutil/pixdesc.h"
  #include "libavutil/rational.h"
  #include "libavutil/audioconvert.h"
- #include "libavutil/imgutils.h"
 +#include "libavutil/avassert.h"
 +#include "libavutil/avstring.h"
  
  #include "avfilter.h"
+ #include "formats.h"
  #include "internal.h"
  
  unsigned avfilter_version(void) {
@@@ -44,145 -41,9 +44,18 @@@ const char *avfilter_configuration(void
  const char *avfilter_license(void)
  {
  #define LICENSE_PREFIX "libavfilter license: "
 -    return LICENSE_PREFIX LIBAV_LICENSE + sizeof(LICENSE_PREFIX) - 1;
 +    return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
 +}
 +
- static void command_queue_pop(AVFilterContext *filter)
++void ff_command_queue_pop(AVFilterContext *filter)
 +{
 +    AVFilterCommand *c= filter->command_queue;
 +    av_freep(&c->arg);
 +    av_freep(&c->command);
 +    filter->command_queue= c->next;
 +    av_free(c);
  }
  
- AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
- {
-     AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef));
-     if (!ret)
-         return NULL;
-     *ret = *ref;
-     if (ref->type == AVMEDIA_TYPE_VIDEO) {
-         ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps));
-         if (!ret->video) {
-             av_free(ret);
-             return NULL;
-         }
-         *ret->video = *ref->video;
-         ret->extended_data = ret->data;
-     } else if (ref->type == AVMEDIA_TYPE_AUDIO) {
-         ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps));
-         if (!ret->audio) {
-             av_free(ret);
-             return NULL;
-         }
-         *ret->audio = *ref->audio;
-         if (ref->extended_data && ref->extended_data != ref->data) {
-             int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
-             if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) *
-                                                  nb_channels))) {
-                 av_freep(&ret->audio);
-                 av_freep(&ret);
-                 return NULL;
-             }
-             memcpy(ret->extended_data, ref->extended_data,
-                    sizeof(*ret->extended_data) * nb_channels);
-         } else
-             ret->extended_data = ret->data;
-     }
-     ret->perms &= pmask;
-     ret->buf->refcount ++;
-     return ret;
- }
- static void free_pool(AVFilterPool *pool)
- {
-     int i;
-     av_assert0(pool->refcount > 0);
-     for (i = 0; i < POOL_SIZE; i++) {
-         if (pool->pic[i]) {
-             AVFilterBufferRef *picref = pool->pic[i];
-             /* free buffer: picrefs stored in the pool are not
-              * supposed to contain a free callback */
-             av_assert0(!picref->buf->refcount);
-             av_freep(&picref->buf->data[0]);
-             av_freep(&picref->buf);
-             av_freep(&picref->audio);
-             av_freep(&picref->video);
-             av_freep(&pool->pic[i]);
-             pool->count--;
-         }
-     }
-     pool->draining = 1;
-     if (!--pool->refcount) {
-         av_assert0(!pool->count);
-         av_free(pool);
-     }
- }
- static void store_in_pool(AVFilterBufferRef *ref)
- {
-     int i;
-     AVFilterPool *pool= ref->buf->priv;
-     av_assert0(ref->buf->data[0]);
-     av_assert0(pool->refcount>0);
-     if (pool->count == POOL_SIZE) {
-         AVFilterBufferRef *ref1 = pool->pic[0];
-         av_freep(&ref1->video);
-         av_freep(&ref1->audio);
-         av_freep(&ref1->buf->data[0]);
-         av_freep(&ref1->buf);
-         av_free(ref1);
-         memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1));
-         pool->count--;
-         pool->pic[POOL_SIZE-1] = NULL;
-     }
-     for (i = 0; i < POOL_SIZE; i++) {
-         if (!pool->pic[i]) {
-             pool->pic[i] = ref;
-             pool->count++;
-             break;
-         }
-     }
-     if (pool->draining) {
-         free_pool(pool);
-     } else
-         --pool->refcount;
- }
- void avfilter_unref_buffer(AVFilterBufferRef *ref)
- {
-     if (!ref)
-         return;
-     av_assert0(ref->buf->refcount > 0);
-     if (!(--ref->buf->refcount)) {
-         if (!ref->buf->free) {
-             store_in_pool(ref);
-             return;
-         }
-         ref->buf->free(ref->buf);
-     }
-     if (ref->extended_data != ref->data)
-         av_freep(&ref->extended_data);
-     av_freep(&ref->video);
-     av_freep(&ref->audio);
-     av_free(ref);
- }
- void avfilter_unref_bufferp(AVFilterBufferRef **ref)
- {
-     avfilter_unref_buffer(*ref);
-     *ref = NULL;
- }
  void avfilter_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
                           AVFilterPad **pads, AVFilterLink ***links,
                           AVFilterPad *newpad)
@@@ -234,17 -95,6 +107,17 @@@ int avfilter_link(AVFilterContext *src
      return 0;
  }
  
-         free_pool((*link)->pool);
 +void avfilter_link_free(AVFilterLink **link)
 +{
 +    if (!*link)
 +        return;
 +
 +    if ((*link)->pool)
++        ff_free_pool((*link)->pool);
 +
 +    av_freep(link);
 +}
 +
  int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
                             unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
  {
      if (link->out_formats)
          avfilter_formats_changeref(&link->out_formats,
                                     &filt->outputs[filt_dstpad_idx]->out_formats);
-     if (link->out_chlayouts)
-         avfilter_formats_changeref(&link->out_chlayouts,
-                                    &filt->outputs[filt_dstpad_idx]->out_chlayouts);
 -    if (link->out_samplerates)
 -        avfilter_formats_changeref(&link->out_samplerates,
 -                                   &filt->outputs[filt_dstpad_idx]->out_samplerates);
+     if (link->out_channel_layouts)
+         ff_channel_layouts_changeref(&link->out_channel_layouts,
+                                      &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
 +    if (link->out_packing)
 +        avfilter_formats_changeref(&link->out_packing,
 +                                   &filt->outputs[filt_dstpad_idx]->out_packing);
++    if (link->out_samplerates)
++        avfilter_formats_changeref(&link->out_samplerates,
++                                   &filt->outputs[filt_dstpad_idx]->out_samplerates);
  
      return 0;
  }
@@@ -309,56 -155,19 +185,58 @@@ int avfilter_config_links(AVFilterConte
              if ((ret = avfilter_config_links(link->src)) < 0)
                  return ret;
  
 -            if (!(config_link = link->srcpad->config_props))
 -                config_link  = avfilter_default_config_output_link;
 -            if ((ret = config_link(link)) < 0)
 +            if (!(config_link = link->srcpad->config_props)) {
 +                if (link->src->input_count != 1) {
 +                    av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
 +                                                    "with more than one input "
 +                                                    "must set config_props() "
 +                                                    "callbacks on all outputs\n");
 +                    return AVERROR(EINVAL);
 +                }
 +            } else if ((ret = config_link(link)) < 0)
                  return ret;
  
 -            if (link->time_base.num == 0 && link->time_base.den == 0)
 -                link->time_base = link->src && link->src->input_count ?
 -                    link->src->inputs[0]->time_base : AV_TIME_BASE_Q;
 -
 -            if (link->sample_aspect_ratio.num == 0 && link->sample_aspect_ratio.den == 0)
 -                link->sample_aspect_ratio = link->src->input_count ?
 -                    link->src->inputs[0]->sample_aspect_ratio : (AVRational){1,1};
 +            switch (link->type) {
 +            case AVMEDIA_TYPE_VIDEO:
 +                if (!link->time_base.num && !link->time_base.den)
 +                    link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
 +
 +                if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
 +                    link->sample_aspect_ratio = inlink ?
 +                        inlink->sample_aspect_ratio : (AVRational){1,1};
 +
++#if 1
 +                if (inlink) {
 +                    if (!link->w)
 +                        link->w = inlink->w;
 +                    if (!link->h)
 +                        link->h = inlink->h;
 +                } else if (!link->w || !link->h) {
 +                    av_log(link->src, AV_LOG_ERROR,
 +                           "Video source filters must set their output link's "
 +                           "width and height\n");
 +                    return AVERROR(EINVAL);
 +                }
 +                break;
 +
 +            case AVMEDIA_TYPE_AUDIO:
 +                if (inlink) {
 +                    if (!link->sample_rate)
 +                        link->sample_rate = inlink->sample_rate;
 +                    if (!link->time_base.num && !link->time_base.den)
 +                        link->time_base = inlink->time_base;
 +                } else if (!link->sample_rate) {
 +                    av_log(link->src, AV_LOG_ERROR,
 +                           "Audio source filters must set their output link's "
 +                           "sample_rate\n");
 +                    return AVERROR(EINVAL);
 +                }
 +
 +                if (!link->time_base.num && !link->time_base.den)
 +                    link->time_base = (AVRational) {1, link->sample_rate};
 +            }
  
++#endif
              if ((config_link = link->dstpad->config_props))
                  if ((ret = config_link(link)) < 0)
                      return ret;
      return 0;
  }
  
 -#ifdef DEBUG
--static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms)
--{
--    snprintf(buf, buf_size, "%s%s%s%s%s%s",
--             perms & AV_PERM_READ      ? "r" : "",
--             perms & AV_PERM_WRITE     ? "w" : "",
--             perms & AV_PERM_PRESERVE  ? "p" : "",
--             perms & AV_PERM_REUSE     ? "u" : "",
--             perms & AV_PERM_REUSE2    ? "U" : "",
--             perms & AV_PERM_NEG_LINESIZES ? "n" : "");
--    return buf;
- }
- static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
- {
-     av_unused char buf[16];
-     av_dlog(ctx,
-             "ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
-             ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0],
-             ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
-             ref->pts, ref->pos);
-     if (ref->video) {
-         av_dlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
-                 ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den,
-                 ref->video->w, ref->video->h,
-                 !ref->video->interlaced     ? 'P' :         /* Progressive  */
-                 ref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
-                 ref->video->key_frame,
-                 av_get_picture_type_char(ref->video->pict_type));
-     }
-     if (ref->audio) {
-         av_dlog(ctx, " cl:%"PRId64"d n:%d r:%d p:%d",
-                 ref->audio->channel_layout,
-                 ref->audio->nb_samples,
-                 ref->audio->sample_rate,
-                 ref->audio->planar);
-     }
-     av_dlog(ctx, "]%s", end ? "\n" : "");
--}
 -#endif
--
  void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
  {
      if (link->type == AVMEDIA_TYPE_VIDEO) {
@@@ -529,128 -246,7 +301,27 @@@ int avfilter_poll_frame(AVFilterLink *l
      return min;
  }
  
 -#define MAX_REGISTERED_AVFILTERS_NB 64
 +void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
 +{
 +    if (pts == AV_NOPTS_VALUE)
 +        return;
 +    link->current_pts =  pts; /* TODO use duration */
 +    if (link->graph && link->age_index >= 0)
 +        ff_avfilter_graph_update_heap(link->graph, link);
 +}
 +
- /* XXX: should we do the duplicating of the picture ref here, instead of
-  * forcing the source filter to do it? */
- void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
- {
-     void (*start_frame)(AVFilterLink *, AVFilterBufferRef *);
-     AVFilterPad *dst = link->dstpad;
-     int perms = picref->perms;
-     AVFilterCommand *cmd= link->dst->command_queue;
-     FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1);
-     if (!(start_frame = dst->start_frame))
-         start_frame = avfilter_default_start_frame;
-     if (picref->linesize[0] < 0)
-         perms |= AV_PERM_NEG_LINESIZES;
-     /* prepare to copy the picture if it has insufficient permissions */
-     if ((dst->min_perms & perms) != dst->min_perms || dst->rej_perms & perms) {
-         av_log(link->dst, AV_LOG_DEBUG,
-                 "frame copy needed (have perms %x, need %x, reject %x)\n",
-                 picref->perms,
-                 link->dstpad->min_perms, link->dstpad->rej_perms);
-         link->cur_buf = avfilter_get_video_buffer(link, dst->min_perms, link->w, link->h);
-         link->src_buf = picref;
-         avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf);
-     }
-     else
-         link->cur_buf = picref;
-     while(cmd && cmd->time <= picref->pts * av_q2d(link->time_base)){
-         av_log(link->dst, AV_LOG_DEBUG,
-                "Processing command time:%f command:%s arg:%s\n",
-                cmd->time, cmd->command, cmd->arg);
-         avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
-         command_queue_pop(link->dst);
-         cmd= link->dst->command_queue;
-     }
-     start_frame(link, link->cur_buf);
-     ff_update_link_current_pts(link, link->cur_buf->pts);
- }
- void avfilter_end_frame(AVFilterLink *link)
- {
-     void (*end_frame)(AVFilterLink *);
-     if (!(end_frame = link->dstpad->end_frame))
-         end_frame = avfilter_default_end_frame;
-     end_frame(link);
-     /* unreference the source picture if we're feeding the destination filter
-      * a copied version dues to permission issues */
-     if (link->src_buf) {
-         avfilter_unref_buffer(link->src_buf);
-         link->src_buf = NULL;
-     }
- }
- void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
- {
-     uint8_t *src[4], *dst[4];
-     int i, j, vsub;
-     void (*draw_slice)(AVFilterLink *, int, int, int);
-     FF_DPRINTF_START(NULL, draw_slice); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " y:%d h:%d dir:%d\n", y, h, slice_dir);
-     /* copy the slice if needed for permission reasons */
-     if (link->src_buf) {
-         vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
-         for (i = 0; i < 4; i++) {
-             if (link->src_buf->data[i]) {
-                 src[i] = link->src_buf-> data[i] +
-                     (y >> (i==1 || i==2 ? vsub : 0)) * link->src_buf-> linesize[i];
-                 dst[i] = link->cur_buf->data[i] +
-                     (y >> (i==1 || i==2 ? vsub : 0)) * link->cur_buf->linesize[i];
-             } else
-                 src[i] = dst[i] = NULL;
-         }
-         for (i = 0; i < 4; i++) {
-             int planew =
-                 av_image_get_linesize(link->format, link->cur_buf->video->w, i);
-             if (!src[i]) continue;
-             for (j = 0; j < h >> (i==1 || i==2 ? vsub : 0); j++) {
-                 memcpy(dst[i], src[i], planew);
-                 src[i] += link->src_buf->linesize[i];
-                 dst[i] += link->cur_buf->linesize[i];
-             }
-         }
-     }
-     if (!(draw_slice = link->dstpad->draw_slice))
-         draw_slice = avfilter_default_draw_slice;
-     draw_slice(link, y, h, slice_dir);
- }
 +int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
 +{
 +    if(!strcmp(cmd, "ping")){
 +        av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
 +        return 0;
 +    }else if(filter->filter->process_command) {
 +        return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
 +    }
 +    return AVERROR(ENOSYS);
 +}
 +
 +#define MAX_REGISTERED_AVFILTERS_NB 128
  
  static AVFilter *registered_avfilters[MAX_REGISTERED_AVFILTERS_NB + 1];
  
@@@ -788,8 -376,12 +459,12 @@@ void avfilter_free(AVFilterContext *fil
                  link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
              avfilter_formats_unref(&link->in_formats);
              avfilter_formats_unref(&link->out_formats);
+             avfilter_formats_unref(&link->in_samplerates);
+             avfilter_formats_unref(&link->out_samplerates);
+             ff_channel_layouts_unref(&link->in_channel_layouts);
+             ff_channel_layouts_unref(&link->out_channel_layouts);
          }
 -        av_freep(&link);
 +        avfilter_link_free(&link);
      }
      for (i = 0; i < filter->output_count; i++) {
          if ((link = filter->outputs[i])) {
                  link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
              avfilter_formats_unref(&link->in_formats);
              avfilter_formats_unref(&link->out_formats);
+             avfilter_formats_unref(&link->in_samplerates);
+             avfilter_formats_unref(&link->out_samplerates);
+             ff_channel_layouts_unref(&link->in_channel_layouts);
+             ff_channel_layouts_unref(&link->out_channel_layouts);
          }
 -        av_freep(&link);
 +        avfilter_link_free(&link);
      }
  
      av_freep(&filter->name);
      av_freep(&filter->inputs);
      av_freep(&filter->outputs);
      av_freep(&filter->priv);
-         command_queue_pop(filter);
 +    while(filter->command_queue){
++        ff_command_queue_pop(filter);
 +    }
      av_free(filter);
  }
  
@@@ -258,8 -238,8 +258,8 @@@ void avfilter_unref_bufferp(AVFilterBuf
   * pointer to each of the pointers to itself.
   */
  typedef struct AVFilterFormats {
-     unsigned format_count;      ///< number of formats
 +    int64_t *formats;           ///< list of media formats
+     unsigned format_count;      ///< number of formats
 -    int *formats;               ///< list of media formats
  
      unsigned refcount;          ///< number of references to this list
      struct AVFilterFormats ***refs; ///< references to this list
@@@ -284,35 -262,12 +283,30 @@@ AVFilterFormats *avfilter_make_format_l
   * @return a non negative value in case of success, or a negative
   * value corresponding to an AVERROR code in case of error
   */
 -int avfilter_add_format(AVFilterFormats **avff, int fmt);
 +int avfilter_add_format(AVFilterFormats **avff, int64_t fmt);
  
 +#if FF_API_OLD_ALL_FORMATS_API
  /**
 - * Return a list of all formats supported by Libav for the given media type.
 + * @deprecated Use avfilter_make_all_formats() instead.
   */
 +attribute_deprecated
  AVFilterFormats *avfilter_all_formats(enum AVMediaType type);
-  * Return a list of all channel layouts supported by FFmpeg.
-  */
- AVFilterFormats *avfilter_make_all_channel_layouts(void);
- /**
 +#endif
 +
 +/**
 + * Return a list of all formats supported by FFmpeg for the given media type.
 + */
 +AVFilterFormats *avfilter_make_all_formats(enum AVMediaType type);
 +
 +/**
 + * A list of all channel layouts supported by libavfilter.
 + */
 +extern const int64_t avfilter_all_channel_layouts[];
 +
 +/**
 + * Return a list of all audio packing formats.
 + */
 +AVFilterFormats *avfilter_make_all_packing_formats(void);
  
  /**
   * Return a format list which contains the intersection of the formats of
@@@ -515,16 -467,12 +509,17 @@@ void avfilter_default_end_frame(AVFilte
  AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link,
                                                       int perms, int w, int h);
  
 +
  /**
 - * A helper for query_formats() which sets all links to the same list of
 - * formats. If there are no links hooked to this filter, the list of formats is
 - * freed.
 + * Helpers for query_formats() which set all links to the same list of
 + * formats/layouts. If there are no links hooked to this filter, the list
 + * of formats is freed.
   */
+ void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats);
 +void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats);
 +void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats);
 +void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats);
 +void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats);
  
  /** Default handler for query_formats() */
  int avfilter_default_query_formats(AVFilterContext *ctx);
@@@ -674,11 -598,6 +669,9 @@@ struct AVFilterLink 
      AVFilterFormats *in_formats;
      AVFilterFormats *out_formats;
  
-     AVFilterFormats *in_chlayouts;
-     AVFilterFormats *out_chlayouts;
 +    AVFilterFormats *in_packing;
 +    AVFilterFormats *out_packing;
 +
      /**
       * The buffer reference currently being sent across the link by the source
       * filter. This is used internally by the filter system to allow
       */
      AVRational time_base;
  
-     /**
-      * Private fields
-      *
-      * The following fields are for internal use only.
-      * Their type, offset, number and semantic can change without notice.
 +    struct AVFilterPool *pool;
 +
 +    /**
 +     * Graph the filter belongs to.
 +     */
 +    struct AVFilterGraph *graph;
 +
 +    /**
 +     * Current timestamp of the link, as defined by the most recent
 +     * frame(s), in AV_TIME_BASE units.
 +     */
 +    int64_t current_pts;
 +
+     /*****************************************************************
+      * All fields below this line are not part of the public API. They
+      * may not be used outside of libavfilter and can be changed and
+      * removed at will.
+      * New public fields should be added right above.
+      *****************************************************************
       */
      /**
 +     * Index in the age array.
 +     */
 +    int age_index;
++
++    /**
+      * Lists of channel layouts and sample rates used for automatic
+      * negotiation.
+      */
+     AVFilterFormats  *in_samplerates;
+     AVFilterFormats *out_samplerates;
+     struct AVFilterChannelLayouts  *in_channel_layouts;
+     struct AVFilterChannelLayouts *out_channel_layouts;
  };
  
  /**
  #include <ctype.h>
  #include <string.h>
  
 +#include "libavutil/audioconvert.h"
 +#include "libavutil/avassert.h"
 +#include "libavutil/pixdesc.h"
  #include "avfilter.h"
  #include "avfiltergraph.h"
+ #include "formats.h"
  #include "internal.h"
  
+ #include "libavutil/audioconvert.h"
  #include "libavutil/log.h"
  
  static const AVClass filtergraph_class = {
@@@ -158,66 -151,10 +160,69 @@@ AVFilterContext *avfilter_graph_get_fil
      return NULL;
  }
  
-            (!avfilter_merge_formats(link->in_chlayouts, link->out_chlayouts) ||
 +static int insert_conv_filter(AVFilterGraph *graph, AVFilterLink *link,
 +                              const char *filt_name, const char *filt_args)
 +{
 +    static int auto_count = 0, ret;
 +    char inst_name[32];
 +    AVFilterContext *filt_ctx;
 +
 +    if (graph->disable_auto_convert) {
 +        av_log(NULL, AV_LOG_ERROR,
 +               "The filters '%s' and '%s' do not have a common format "
 +               "and automatic conversion is disabled.\n",
 +               link->src->name, link->dst->name);
 +        return AVERROR(EINVAL);
 +    }
 +
 +    snprintf(inst_name, sizeof(inst_name), "auto-inserted %s %d",
 +            filt_name, auto_count++);
 +
 +    if ((ret = avfilter_graph_create_filter(&filt_ctx,
 +                                            avfilter_get_by_name(filt_name),
 +                                            inst_name, filt_args, NULL, graph)) < 0)
 +        return ret;
 +    if ((ret = avfilter_insert_filter(link, filt_ctx, 0, 0)) < 0)
 +        return ret;
 +
 +    filt_ctx->filter->query_formats(filt_ctx);
 +
 +    if ( ((link = filt_ctx-> inputs[0]) &&
 +           !avfilter_merge_formats(link->in_formats, link->out_formats)) ||
 +         ((link = filt_ctx->outputs[0]) &&
 +           !avfilter_merge_formats(link->in_formats, link->out_formats))
 +       ) {
 +        av_log(NULL, AV_LOG_ERROR,
 +               "Impossible to convert between the formats supported by the filter "
 +               "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
 +        return AVERROR(EINVAL);
 +    }
 +
 +    if (link->type == AVMEDIA_TYPE_AUDIO &&
 +         (((link = filt_ctx-> inputs[0]) &&
-            (!avfilter_merge_formats(link->in_chlayouts, link->out_chlayouts) ||
++           (!ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts) ||
 +            !avfilter_merge_formats(link->in_packing,   link->out_packing))) ||
 +         ((link = filt_ctx->outputs[0]) &&
++           (!ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts) ||
 +            !avfilter_merge_formats(link->in_packing,   link->out_packing))))
 +       ) {
 +        av_log(NULL, AV_LOG_ERROR,
 +               "Impossible to convert between the channel layouts/packing formats supported by the filter "
 +               "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
 +        return AVERROR(EINVAL);
 +    }
 +
 +    return 0;
 +}
 +
  static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
  {
      int i, j, ret;
-     AVFilterFormats *formats, *chlayouts, *packing;
 +    char filt_args[128];
++    AVFilterFormats *formats, *packing;
++    AVFilterChannelLayouts *chlayouts;
++    AVFilterFormats *samplerates;
+     int scaler_count = 0, resampler_count = 0;
  
      /* ask all the sub-filters for their supported media formats */
      for (i = 0; i < graph->filter_count; i++) {
  
          for (j = 0; j < filter->input_count; j++) {
              AVFilterLink *link = filter->inputs[j];
-                 if (!link->in_chlayouts || !link->out_chlayouts ||
++#if 0
 +            if (!link) continue;
 +
 +            if (!link->in_formats || !link->out_formats)
 +                return AVERROR(EINVAL);
 +
 +            if (link->type == AVMEDIA_TYPE_VIDEO &&
 +                !avfilter_merge_formats(link->in_formats, link->out_formats)) {
 +
 +                /* couldn't merge format lists, auto-insert scale filter */
 +                snprintf(filt_args, sizeof(filt_args), "0:0:%s",
 +                         graph->scale_sws_opts);
 +                if (ret = insert_conv_filter(graph, link, "scale", filt_args))
 +                    return ret;
 +            }
 +            else if (link->type == AVMEDIA_TYPE_AUDIO) {
-                 chlayouts = avfilter_merge_formats(link->in_chlayouts, link->out_chlayouts);
++                if (!link->in_channel_layouts || !link->out_channel_layouts ||
 +                    !link->in_packing   || !link->out_packing)
 +                    return AVERROR(EINVAL);
 +
 +                /* Merge all three list before checking: that way, in all
 +                 * three categories, aconvert will use a common format
 +                 * whenever possible. */
 +                formats   = avfilter_merge_formats(link->in_formats,   link->out_formats);
-                 if (!formats || !chlayouts || !packing)
++                chlayouts   = ff_merge_channel_layouts(link->in_channel_layouts  , link->out_channel_layouts);
++                samplerates = ff_merge_samplerates    (link->in_samplerates, link->out_samplerates);
 +                packing   = avfilter_merge_formats(link->in_packing,   link->out_packing);
++
++                if (!formats || !chlayouts || !packing || !samplerates)
 +                    if (ret = insert_conv_filter(graph, link, "aconvert", NULL))
 +                       return ret;
++#else
+             int convert_needed = 0;
+             if (!link)
+                 continue;
+             if (link->in_formats != link->out_formats &&
+                 !avfilter_merge_formats(link->in_formats,
+                                         link->out_formats))
+                 convert_needed = 1;
+             if (link->type == AVMEDIA_TYPE_AUDIO) {
+                 if (link->in_channel_layouts != link->out_channel_layouts &&
+                     !ff_merge_channel_layouts(link->in_channel_layouts,
+                                               link->out_channel_layouts))
+                     convert_needed = 1;
+                 if (link->in_samplerates != link->out_samplerates &&
+                     !ff_merge_samplerates(link->in_samplerates,
+                                           link->out_samplerates))
+                     convert_needed = 1;
+             }
+             if (convert_needed) {
+                 AVFilterContext *convert;
+                 AVFilter *filter;
+                 AVFilterLink *inlink, *outlink;
+                 char scale_args[256];
+                 char inst_name[30];
+                 /* couldn't merge format lists. auto-insert conversion filter */
+                 switch (link->type) {
+                 case AVMEDIA_TYPE_VIDEO:
+                     snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
+                              scaler_count++);
+                     snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
+                     if ((ret = avfilter_graph_create_filter(&convert,
+                                                             avfilter_get_by_name("scale"),
+                                                             inst_name, scale_args, NULL,
+                                                             graph)) < 0)
+                         return ret;
+                     break;
+                 case AVMEDIA_TYPE_AUDIO:
+                     if (!(filter = avfilter_get_by_name("resample"))) {
+                         av_log(log_ctx, AV_LOG_ERROR, "'resample' filter "
+                                "not present, cannot convert audio formats.\n");
+                         return AVERROR(EINVAL);
+                     }
+                     snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
+                              resampler_count++);
+                     if ((ret = avfilter_graph_create_filter(&convert,
+                                                             avfilter_get_by_name("resample"),
+                                                             inst_name, NULL, NULL, graph)) < 0)
+                         return ret;
+                     break;
+                 default:
+                     return AVERROR(EINVAL);
+                 }
+                 if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
+                     return ret;
+                 convert->filter->query_formats(convert);
+                 inlink  = convert->inputs[0];
+                 outlink = convert->outputs[0];
+                 if (!avfilter_merge_formats( inlink->in_formats,  inlink->out_formats) ||
+                     !avfilter_merge_formats(outlink->in_formats, outlink->out_formats))
+                     ret |= AVERROR(ENOSYS);
+                 if (inlink->type == AVMEDIA_TYPE_AUDIO &&
+                     (!ff_merge_samplerates(inlink->in_samplerates,
+                                            inlink->out_samplerates) ||
+                      !ff_merge_channel_layouts(inlink->in_channel_layouts,
+                                                inlink->out_channel_layouts)))
+                     ret |= AVERROR(ENOSYS);
+                 if (outlink->type == AVMEDIA_TYPE_AUDIO &&
+                     (!ff_merge_samplerates(outlink->in_samplerates,
+                                            outlink->out_samplerates) ||
+                      !ff_merge_channel_layouts(outlink->in_channel_layouts,
+                                                outlink->out_channel_layouts)))
+                     ret |= AVERROR(ENOSYS);
+                 if (ret < 0) {
+                     av_log(log_ctx, AV_LOG_ERROR,
+                            "Impossible to convert between the formats supported by the filter "
+                            "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
+                     return ret;
+                 }
++#endif
              }
          }
      }
      return 0;
  }
  
- static void pick_format(AVFilterLink *link, AVFilterLink *ref)
 -static int pick_format(AVFilterLink *link)
++static int pick_format(AVFilterLink *link, AVFilterLink *ref)
  {
      if (!link || !link->in_formats)
-         return;
+         return 0;
  
 +    if (link->type == AVMEDIA_TYPE_VIDEO) {
 +        if(ref && ref->type == AVMEDIA_TYPE_VIDEO){
 +            int has_alpha= av_pix_fmt_descriptors[ref->format].nb_components % 2 == 0;
 +            enum PixelFormat best= PIX_FMT_NONE;
 +            int i;
 +            for (i=0; i<link->in_formats->format_count; i++) {
 +                enum PixelFormat p = link->in_formats->formats[i];
 +                best= avcodec_find_best_pix_fmt2(best, p, ref->format, has_alpha, NULL);
 +            }
 +            link->in_formats->formats[0] = best;
 +        }
 +    }
 +
      link->in_formats->format_count = 1;
      link->format = link->in_formats->formats[0];
-     avfilter_formats_unref(&link->in_formats);
-     avfilter_formats_unref(&link->out_formats);
  
      if (link->type == AVMEDIA_TYPE_AUDIO) {
-         link->in_chlayouts->format_count = 1;
-         link->channel_layout = link->in_chlayouts->formats[0];
-         avfilter_formats_unref(&link->in_chlayouts);
-         avfilter_formats_unref(&link->out_chlayouts);
-         link->in_packing->format_count = 1;
-         link->planar = link->in_packing->formats[0] == AVFILTER_PLANAR;
-         avfilter_formats_unref(&link->in_packing);
-         avfilter_formats_unref(&link->out_packing);
+         if (!link->in_samplerates->format_count) {
+             av_log(link->src, AV_LOG_ERROR, "Cannot select sample rate for"
+                    " the link between filters %s and %s.\n", link->src->name,
+                    link->dst->name);
+             return AVERROR(EINVAL);
+         }
+         link->in_samplerates->format_count = 1;
+         link->sample_rate = link->in_samplerates->formats[0];
+         if (!link->in_channel_layouts->nb_channel_layouts) {
+             av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
+                    "the link between filters %s and %s.\n", link->src->name,
+                    link->dst->name);
+             return AVERROR(EINVAL);
+         }
+         link->in_channel_layouts->nb_channel_layouts = 1;
+         link->channel_layout = link->in_channel_layouts->channel_layouts[0];
      }
+     avfilter_formats_unref(&link->in_formats);
+     avfilter_formats_unref(&link->out_formats);
+     avfilter_formats_unref(&link->in_samplerates);
+     avfilter_formats_unref(&link->out_samplerates);
+     ff_channel_layouts_unref(&link->in_channel_layouts);
+     ff_channel_layouts_unref(&link->out_channel_layouts);
+     return 0;
  }
  
+ #define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \
+ do {                                                                   \
+     for (i = 0; i < filter->input_count; i++) {                        \
+         AVFilterLink *link = filter->inputs[i];                        \
+         fmt_type fmt;                                                  \
+                                                                        \
+         if (!link->out_ ## list || link->out_ ## list->nb != 1)        \
+             continue;                                                  \
+         fmt = link->out_ ## list->var[0];                              \
+                                                                        \
+         for (j = 0; j < filter->output_count; j++) {                   \
+             AVFilterLink *out_link = filter->outputs[j];               \
+             list_type *fmts;                                           \
+                                                                        \
+             if (link->type != out_link->type ||                        \
+                 out_link->in_ ## list->nb == 1)                        \
+                 continue;                                              \
+             fmts = out_link->in_ ## list;                              \
+                                                                        \
+             if (!out_link->in_ ## list->nb) {                          \
+                 add_format(&out_link->in_ ##list, fmt);                \
+                 break;                                                 \
+             }                                                          \
+                                                                        \
+             for (k = 0; k < out_link->in_ ## list->nb; k++)            \
+                 if (fmts->var[k] == fmt) {                             \
+                     fmts->var[0]  = fmt;                               \
+                     fmts->nb = 1;                                      \
+                     ret = 1;                                           \
+                     break;                                             \
+                 }                                                      \
+         }                                                              \
+     }                                                                  \
+ } while (0)
  static int reduce_formats_on_filter(AVFilterContext *filter)
  {
      int i, j, k, ret = 0;
@@@ -347,51 -361,121 +475,153 @@@ static void reduce_formats(AVFilterGrap
      } while (reduced);
  }
  
- static void pick_formats(AVFilterGraph *graph)
+ static void swap_samplerates_on_filter(AVFilterContext *filter)
  {
+     AVFilterLink *link = NULL;
+     int sample_rate;
      int i, j;
+     for (i = 0; i < filter->input_count; i++) {
+         link = filter->inputs[i];
+         if (link->type == AVMEDIA_TYPE_AUDIO &&
+             link->out_samplerates->format_count == 1)
+             break;
+     }
+     if (i == filter->input_count)
+         return;
+     sample_rate = link->out_samplerates->formats[0];
+     for (i = 0; i < filter->output_count; i++) {
+         AVFilterLink *outlink = filter->outputs[i];
+         int best_idx, best_diff = INT_MAX;
+         if (outlink->type != AVMEDIA_TYPE_AUDIO ||
+             outlink->in_samplerates->format_count < 2)
+             continue;
+         for (j = 0; j < outlink->in_samplerates->format_count; j++) {
+             int diff = abs(sample_rate - outlink->in_samplerates->formats[j]);
+             if (diff < best_diff) {
+                 best_diff = diff;
+                 best_idx  = j;
+             }
+         }
+         FFSWAP(int, outlink->in_samplerates->formats[0],
+                outlink->in_samplerates->formats[best_idx]);
+     }
+ }
+ static void swap_samplerates(AVFilterGraph *graph)
+ {
+     int i;
+     for (i = 0; i < graph->filter_count; i++)
+         swap_samplerates_on_filter(graph->filters[i]);
+ }
+ static void swap_channel_layouts_on_filter(AVFilterContext *filter)
+ {
+     AVFilterLink *link = NULL;
+     uint64_t chlayout;
+     int i, j;
+     for (i = 0; i < filter->input_count; i++) {
+         link = filter->inputs[i];
+         if (link->type == AVMEDIA_TYPE_AUDIO &&
+             link->out_channel_layouts->nb_channel_layouts == 1)
+             break;
+     }
+     if (i == filter->input_count)
+         return;
+     chlayout = link->out_channel_layouts->channel_layouts[0];
+     for (i = 0; i < filter->output_count; i++) {
+         AVFilterLink *outlink = filter->outputs[i];
+         int best_idx, best_score = INT_MIN;
+         if (outlink->type != AVMEDIA_TYPE_AUDIO ||
+             outlink->in_channel_layouts->nb_channel_layouts < 2)
+             continue;
+         for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
+             uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
+             int matched_channels  = av_get_channel_layout_nb_channels(chlayout &
+                                                                       out_chlayout);
+             int extra_channels     = av_get_channel_layout_nb_channels(out_chlayout &
+                                                                        (~chlayout));
+             int score = matched_channels - extra_channels;
+             if (score > best_score) {
+                 best_score = score;
+                 best_idx   = j;
+             }
+         }
+         FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
+                outlink->in_channel_layouts->channel_layouts[best_idx]);
+     }
+ }
+ static void swap_channel_layouts(AVFilterGraph *graph)
+ {
+     int i;
+     for (i = 0; i < graph->filter_count; i++)
+         swap_channel_layouts_on_filter(graph->filters[i]);
+ }
+ static int pick_formats(AVFilterGraph *graph)
+ {
+     int i, j, ret;
 +    int change;
 +
 +    do{
 +        change = 0;
 +        for (i = 0; i < graph->filter_count; i++) {
 +            AVFilterContext *filter = graph->filters[i];
 +            if (filter->input_count){
 +                for (j = 0; j < filter->input_count; j++){
 +                    if(filter->inputs[j]->in_formats && filter->inputs[j]->in_formats->format_count == 1) {
 +                        pick_format(filter->inputs[j], NULL);
 +                        change = 1;
 +                    }
 +                }
 +            }
 +            if (filter->output_count){
 +                for (j = 0; j < filter->output_count; j++){
 +                    if(filter->outputs[j]->in_formats && filter->outputs[j]->in_formats->format_count == 1) {
 +                        pick_format(filter->outputs[j], NULL);
 +                        change = 1;
 +                    }
 +                }
 +            }
 +            if (filter->input_count && filter->output_count && filter->inputs[0]->format>=0) {
 +                for (j = 0; j < filter->output_count; j++) {
 +                    if(filter->outputs[j]->format<0) {
 +                        pick_format(filter->outputs[j], filter->inputs[0]);
 +                        change = 1;
 +                    }
 +                }
 +            }
 +        }
 +    }while(change);
  
      for (i = 0; i < graph->filter_count; i++) {
          AVFilterContext *filter = graph->filters[i];
-         if (1) {
-             for (j = 0; j < filter->input_count; j++)
-                 pick_format(filter->inputs[j], NULL);
-             for (j = 0; j < filter->output_count; j++)
-                 pick_format(filter->outputs[j], NULL);
-         }
+         for (j = 0; j < filter->input_count; j++)
 -            if ((ret = pick_format(filter->inputs[j])) < 0)
++            if ((ret = pick_format(filter->inputs[j], NULL)) < 0)
+                 return ret;
+         for (j = 0; j < filter->output_count; j++)
 -            if ((ret = pick_format(filter->outputs[j])) < 0)
++            if ((ret = pick_format(filter->outputs[j], NULL)) < 0)
+                 return ret;
      }
+     return 0;
  }
  
  int ff_avfilter_graph_config_formats(AVFilterGraph *graph, AVClass *log_ctx)
index 0000000,be0da26..32431c6
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,168 +1,244 @@@
 - * This file is part of Libav.
+ /*
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 -/* TODO: buffer pool.  see comment for avfilter_default_get_video_buffer() */
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ #include "libavutil/audioconvert.h"
++#include "libavutil/avassert.h"
+ #include "libavcodec/avcodec.h"
+ #include "avfilter.h"
+ #include "internal.h"
++#include "avcodec.h"
 -        if (ref->extended_data != ref->data) {
+ void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
+ {
+     if (ptr->extended_data != ptr->data)
+         av_freep(&ptr->extended_data);
+     av_free(ptr->data[0]);
+     av_free(ptr);
+ }
+ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
+ {
+     AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef));
+     if (!ret)
+         return NULL;
+     *ret = *ref;
+     if (ref->type == AVMEDIA_TYPE_VIDEO) {
+         ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps));
+         if (!ret->video) {
+             av_free(ret);
+             return NULL;
+         }
+         *ret->video = *ref->video;
+         ret->extended_data = ret->data;
+     } else if (ref->type == AVMEDIA_TYPE_AUDIO) {
+         ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps));
+         if (!ret->audio) {
+             av_free(ret);
+             return NULL;
+         }
+         *ret->audio = *ref->audio;
 -    if (!(--ref->buf->refcount))
++        if (ref->extended_data && ref->extended_data != ref->data) {
+             int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
+             if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) *
+                                                  nb_channels))) {
+                 av_freep(&ret->audio);
+                 av_freep(&ret);
+                 return NULL;
+             }
+             memcpy(ret->extended_data, ref->extended_data,
+                    sizeof(*ret->extended_data) * nb_channels);
+         } else
+             ret->extended_data = ret->data;
+     }
+     ret->perms &= pmask;
+     ret->buf->refcount ++;
+     return ret;
+ }
++void ff_free_pool(AVFilterPool *pool)
++{
++    int i;
++
++    av_assert0(pool->refcount > 0);
++
++    for (i = 0; i < POOL_SIZE; i++) {
++        if (pool->pic[i]) {
++            AVFilterBufferRef *picref = pool->pic[i];
++            /* free buffer: picrefs stored in the pool are not
++             * supposed to contain a free callback */
++            av_assert0(!picref->buf->refcount);
++            av_freep(&picref->buf->data[0]);
++            av_freep(&picref->buf);
++
++            av_freep(&picref->audio);
++            av_freep(&picref->video);
++            av_freep(&pool->pic[i]);
++            pool->count--;
++        }
++    }
++    pool->draining = 1;
++
++    if (!--pool->refcount) {
++        av_assert0(!pool->count);
++        av_free(pool);
++    }
++}
++
++static void store_in_pool(AVFilterBufferRef *ref)
++{
++    int i;
++    AVFilterPool *pool= ref->buf->priv;
++
++    av_assert0(ref->buf->data[0]);
++    av_assert0(pool->refcount>0);
++
++    if (pool->count == POOL_SIZE) {
++        AVFilterBufferRef *ref1 = pool->pic[0];
++        av_freep(&ref1->video);
++        av_freep(&ref1->audio);
++        av_freep(&ref1->buf->data[0]);
++        av_freep(&ref1->buf);
++        av_free(ref1);
++        memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1));
++        pool->count--;
++        pool->pic[POOL_SIZE-1] = NULL;
++    }
++
++    for (i = 0; i < POOL_SIZE; i++) {
++        if (!pool->pic[i]) {
++            pool->pic[i] = ref;
++            pool->count++;
++            break;
++        }
++    }
++    if (pool->draining) {
++        ff_free_pool(pool);
++    } else
++        --pool->refcount;
++}
++
+ void avfilter_unref_buffer(AVFilterBufferRef *ref)
+ {
+     if (!ref)
+         return;
 -    av_free(ref->video);
 -    av_free(ref->audio);
++    av_assert0(ref->buf->refcount > 0);
++    if (!(--ref->buf->refcount)) {
++        if (!ref->buf->free) {
++            store_in_pool(ref);
++            return;
++        }
+         ref->buf->free(ref->buf);
++    }
+     if (ref->extended_data != ref->data)
+         av_freep(&ref->extended_data);
 -        dst->video->pixel_aspect        = src->sample_aspect_ratio;
++    av_freep(&ref->video);
++    av_freep(&ref->audio);
+     av_free(ref);
+ }
++void avfilter_unref_bufferp(AVFilterBufferRef **ref)
++{
++    avfilter_unref_buffer(*ref);
++    *ref = NULL;
++}
++
+ int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
+ {
+     dst->pts    = src->pts;
++    dst->pos    = src->pkt_pos;
+     dst->format = src->format;
+     switch (dst->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         dst->video->w                   = src->width;
+         dst->video->h                   = src->height;
 -        dst->sample_aspect_ratio = src->video->pixel_aspect;
++        dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
+         dst->video->interlaced          = src->interlaced_frame;
+         dst->video->top_field_first     = src->top_field_first;
+         dst->video->key_frame           = src->key_frame;
+         dst->video->pict_type           = src->pict_type;
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         dst->audio->sample_rate         = src->sample_rate;
+         dst->audio->channel_layout      = src->channel_layout;
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     return 0;
+ }
+ int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src)
+ {
+     int planes, nb_channels;
+     memcpy(dst->data, src->data, sizeof(dst->data));
+     memcpy(dst->linesize, src->linesize, sizeof(dst->linesize));
+     dst->pts     = src->pts;
+     dst->format  = src->format;
+     switch (src->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         dst->width               = src->video->w;
+         dst->height              = src->video->h;
++        dst->sample_aspect_ratio = src->video->sample_aspect_ratio;
+         dst->interlaced_frame    = src->video->interlaced;
+         dst->top_field_first     = src->video->top_field_first;
+         dst->key_frame           = src->video->key_frame;
+         dst->pict_type           = src->video->pict_type;
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         nb_channels = av_get_channel_layout_nb_channels(src->audio->channel_layout);
+         planes      = av_sample_fmt_is_planar(src->format) ? nb_channels : 1;
+         if (planes > FF_ARRAY_ELEMS(dst->data)) {
+             dst->extended_data = av_mallocz(planes * sizeof(*dst->extended_data));
+             if (!dst->extended_data)
+                 return AVERROR(ENOMEM);
+             memcpy(dst->extended_data, src->extended_data,
+                    planes * sizeof(dst->extended_data));
+         } else
+             dst->extended_data = dst->data;
+         dst->sample_rate         = src->audio->sample_rate;
+         dst->channel_layout      = src->audio->channel_layout;
+         dst->nb_samples          = src->audio->nb_samples;
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     return 0;
+ }
+ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
+ {
+     // copy common properties
+     dst->pts             = src->pts;
+     dst->pos             = src->pos;
+     switch (src->type) {
+     case AVMEDIA_TYPE_VIDEO: *dst->video = *src->video; break;
+     case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break;
+     default: break;
+     }
+ }
@@@ -98,8 -117,68 +117,68 @@@ int av_buffersink_read(AVFilterContext 
      return 0;
  }
  
+ static int read_from_fifo(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
+                           int nb_samples)
+ {
+     BufferSinkContext *s = ctx->priv;
+     AVFilterLink   *link = ctx->inputs[0];
+     AVFilterBufferRef *buf;
+     if (!(buf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples)))
+         return AVERROR(ENOMEM);
+     av_audio_fifo_read(s->audio_fifo, (void**)buf->extended_data, nb_samples);
+     buf->pts = s->next_pts;
+     s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
+                                 link->time_base);
+     *pbuf = buf;
+     return 0;
+ }
+ int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
+                                int nb_samples)
+ {
+     BufferSinkContext *s = ctx->priv;
+     AVFilterLink   *link = ctx->inputs[0];
+     int ret = 0;
+     if (!s->audio_fifo) {
+         int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
+         if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
+             return AVERROR(ENOMEM);
+     }
+     while (ret >= 0) {
+         AVFilterBufferRef *buf;
+         if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
+             return read_from_fifo(ctx, pbuf, nb_samples);
+         ret = av_buffersink_read(ctx, &buf);
+         if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo))
+             return read_from_fifo(ctx, pbuf, av_audio_fifo_size(s->audio_fifo));
+         else if (ret < 0)
+             return ret;
+         if (buf->pts != AV_NOPTS_VALUE) {
+             s->next_pts = buf->pts -
+                           av_rescale_q(av_audio_fifo_size(s->audio_fifo),
+                                        (AVRational){ 1, link->sample_rate },
+                                        link->time_base);
+         }
+         ret = av_audio_fifo_write(s->audio_fifo, (void**)buf->extended_data,
+                                   buf->audio->nb_samples);
+         avfilter_unref_buffer(buf);
+     }
+     return ret;
+ }
  AVFilter avfilter_vsink_buffer = {
 -    .name      = "buffersink",
 +    .name      = "buffersink_old",
      .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
      .priv_size = sizeof(BufferSinkContext),
      .init      = init,
                                    { .name = NULL }},
      .outputs   = (AVFilterPad[]) {{ .name = NULL }},
  };
 -    .name      = "abuffersink",
+ AVFilter avfilter_asink_abuffer = {
++    .name      = "abuffersink_old",
+     .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
+     .priv_size = sizeof(BufferSinkContext),
+     .init      = init,
+     .uninit    = uninit,
+     .inputs    = (AVFilterPad[]) {{ .name           = "default",
+                                     .type           = AVMEDIA_TYPE_AUDIO,
+                                     .filter_samples = filter_samples,
+                                     .min_perms      = AV_PERM_READ, },
+                                   { .name = NULL }},
+     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
+ };
  #include "avfilter.h"
  
  /**
 + * Struct to use for initializing a buffersink context.
 + */
 +typedef struct {
 +    const enum PixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by PIX_FMT_NONE
 +} AVBufferSinkParams;
 +
 +/**
 + * Create an AVBufferSinkParams structure.
 + *
 + * Must be freed with av_free().
 + */
 +AVBufferSinkParams *av_buffersink_params_alloc(void);
 +
 +/**
 + * Struct to use for initializing an abuffersink context.
 + */
 +typedef struct {
 +    const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
 +    const int64_t *channel_layouts;         ///< list of allowed channel layouts, terminated by -1
 +    const int *packing_fmts;                ///< list of allowed packing formats
 +} AVABufferSinkParams;
 +
 +/**
 + * Create an AVABufferSinkParams structure.
 + *
 + * Must be freed with av_free().
 + */
 +AVABufferSinkParams *av_abuffersink_params_alloc(void);
 +
 +/**
 + * Tell av_buffersink_get_buffer_ref() to read video/samples buffer
 + * reference, but not remove it from the buffer. This is useful if you
 + * need only to read a video/samples buffer, without to fetch it.
 + */
 +#define AV_BUFFERSINK_FLAG_PEEK 1
 +
 +/**
 + * Tell av_buffersink_get_buffer_ref() not to request a frame fom its input.
 + * If a frame is already buffered, it is read (and removed from the buffer),
 + * but if no frame is present, return AVERROR(EAGAIN).
 + */
 +#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
 +
 +/**
 + * Get an audio/video buffer data from buffer_sink and put it in bufref.
 + *
 + * This function works with both audio and video buffer sinks.
 + *
 + * @param buffer_sink pointer to a buffersink or abuffersink context
 + * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
 + * @return >= 0 in case of success, a negative AVERROR code in case of
 + * failure
 + */
 +int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
 +                                 AVFilterBufferRef **bufref, int flags);
 +
 +
 +/**
 + * Get the number of immediately available frames.
 + */
 +int av_buffersink_poll_frame(AVFilterContext *ctx);
 +
 +#if FF_API_OLD_VSINK_API
 +/**
 + * @deprecated Use av_buffersink_get_buffer_ref() instead.
 + */
 +attribute_deprecated
 +int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *buffer_sink,
 +                                         AVFilterBufferRef **picref, int flags);
 +#endif
 +
 +/**
   * Get a buffer with filtered data from sink and put it in buf.
   *
-  * @param sink pointer to a context of a buffersink AVFilter.
+  * @param sink pointer to a context of a buffersink or abuffersink AVFilter.
   * @param buf pointer to the buffer will be written here if buf is non-NULL. buf
   *            must be freed by the caller using avfilter_unref_buffer().
   *            Buf may also be NULL to query whether a buffer is ready to be
index 0000000,ca9390a..305d1c4
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,402 +1,403 @@@
 - * This file is part of Libav.
+ /*
+  * Copyright (c) 2008 Vitor Sessak
+  *
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 -int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ /**
+  * @file
+  * memory buffer source filter
+  */
+ #include "audio.h"
+ #include "avfilter.h"
+ #include "buffersrc.h"
+ #include "formats.h"
+ #include "vsrc_buffer.h"
++#include "avcodec.h"
+ #include "libavutil/audioconvert.h"
+ #include "libavutil/fifo.h"
+ #include "libavutil/imgutils.h"
+ #include "libavutil/opt.h"
+ #include "libavutil/samplefmt.h"
+ typedef struct {
+     const AVClass    *class;
+     AVFifoBuffer     *fifo;
+     AVRational        time_base;     ///< time_base to set in the output link
+     /* video only */
+     int               h, w;
+     enum PixelFormat  pix_fmt;
+     AVRational        pixel_aspect;
+     /* audio only */
+     int sample_rate;
+     enum AVSampleFormat sample_fmt;
+     char               *sample_fmt_str;
+     uint64_t channel_layout;
+     char    *channel_layout_str;
+     int eof;
+ } BufferSourceContext;
+ #define CHECK_VIDEO_PARAM_CHANGE(s, c, width, height, format)\
+     if (c->w != width || c->h != height || c->pix_fmt != format) {\
+         av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
+         return AVERROR(EINVAL);\
+     }
+ #define CHECK_AUDIO_PARAM_CHANGE(s, c, srate, ch_layout, format)\
+     if (c->sample_fmt != format || c->sample_rate != srate ||\
+         c->channel_layout != ch_layout) {\
+         av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
+         return AVERROR(EINVAL);\
+     }
+ #if FF_API_VSRC_BUFFER_ADD_FRAME
 -int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
++static int av_vsrc_buffer_add_frame_alt(AVFilterContext *buffer_filter, AVFrame *frame,
+                              int64_t pts, AVRational pixel_aspect)
+ {
+     int64_t orig_pts = frame->pts;
+     AVRational orig_sar = frame->sample_aspect_ratio;
+     int ret;
+     frame->pts = pts;
+     frame->sample_aspect_ratio = pixel_aspect;
+     if ((ret = av_buffersrc_write_frame(buffer_filter, frame)) < 0)
+         return ret;
+     frame->pts = orig_pts;
+     frame->sample_aspect_ratio = orig_sar;
+     return 0;
+ }
+ #endif
+ int av_buffersrc_write_frame(AVFilterContext *buffer_filter, AVFrame *frame)
+ {
+     BufferSourceContext *c = buffer_filter->priv;
+     AVFilterBufferRef *buf;
+     int ret;
+     if (!frame) {
+         c->eof = 1;
+         return 0;
+     } else if (c->eof)
+         return AVERROR(EINVAL);
+     if (!av_fifo_space(c->fifo) &&
+         (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
+                                          sizeof(buf))) < 0)
+         return ret;
+     switch (buffer_filter->outputs[0]->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         CHECK_VIDEO_PARAM_CHANGE(buffer_filter, c, frame->width, frame->height,
+                                  frame->format);
+         buf = avfilter_get_video_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
+                                         c->w, c->h);
+         av_image_copy(buf->data, buf->linesize, frame->data, frame->linesize,
+                       c->pix_fmt, c->w, c->h);
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         CHECK_AUDIO_PARAM_CHANGE(buffer_filter, c, frame->sample_rate, frame->channel_layout,
+                                  frame->format);
+         buf = ff_get_audio_buffer(buffer_filter->outputs[0], AV_PERM_WRITE,
+                                   frame->nb_samples);
+         av_samples_copy(buf->extended_data, frame->extended_data,
+                         0, 0, frame->nb_samples,
+                         av_get_channel_layout_nb_channels(frame->channel_layout),
+                         frame->format);
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     avfilter_copy_frame_props(buf, frame);
+     if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
+         avfilter_unref_buffer(buf);
+         return ret;
+     }
+     return 0;
+ }
++static int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf)
+ {
+     BufferSourceContext *c = s->priv;
+     int ret;
+     if (!buf) {
+         c->eof = 1;
+         return 0;
+     } else if (c->eof)
+         return AVERROR(EINVAL);
+     if (!av_fifo_space(c->fifo) &&
+         (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
+                                          sizeof(buf))) < 0)
+         return ret;
+     switch (s->outputs[0]->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         CHECK_VIDEO_PARAM_CHANGE(s, c, buf->video->w, buf->video->h, buf->format);
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         CHECK_AUDIO_PARAM_CHANGE(s, c, buf->audio->sample_rate, buf->audio->channel_layout,
+                                  buf->format);
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0)
+         return ret;
+     return 0;
+ }
+ static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
+ {
+     BufferSourceContext *c = ctx->priv;
+     char pix_fmt_str[128];
+     int n = 0;
+     if (!args ||
+         (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
+                     &c->time_base.num, &c->time_base.den,
+                     &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
+         av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args);
+         return AVERROR(EINVAL);
+     }
+     if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) {
+         char *tail;
+         c->pix_fmt = strtol(pix_fmt_str, &tail, 10);
+         if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) {
+             av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str);
+             return AVERROR(EINVAL);
+         }
+     }
+     if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
+         return AVERROR(ENOMEM);
+     av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
+     return 0;
+ }
+ #define OFFSET(x) offsetof(BufferSourceContext, x)
+ #define A AV_OPT_FLAG_AUDIO_PARAM
+ static const AVOption audio_options[] = {
+     { "time_base",      NULL, OFFSET(time_base),           AV_OPT_TYPE_RATIONAL, { 0 }, 0, INT_MAX, A },
+     { "sample_rate",    NULL, OFFSET(sample_rate),         AV_OPT_TYPE_INT,      { 0 }, 0, INT_MAX, A },
+     { "sample_fmt",     NULL, OFFSET(sample_fmt_str),      AV_OPT_TYPE_STRING,             .flags = A },
+     { "channel_layout", NULL, OFFSET(channel_layout_str),  AV_OPT_TYPE_STRING,             .flags = A },
+     { NULL },
+ };
+ static const AVClass abuffer_class = {
+     .class_name = "abuffer source",
+     .item_name  = av_default_item_name,
+     .option     = audio_options,
+     .version    = LIBAVUTIL_VERSION_INT,
+ };
+ static av_cold int init_audio(AVFilterContext *ctx, const char *args, void *opaque)
+ {
+     BufferSourceContext *s = ctx->priv;
+     int ret = 0;
+     s->class = &abuffer_class;
+     av_opt_set_defaults(s);
+     if ((ret = av_set_options_string(s, args, "=", ":")) < 0) {
+         av_log(ctx, AV_LOG_ERROR, "Error parsing options string: %s.\n", args);
+         goto fail;
+     }
+     s->sample_fmt = av_get_sample_fmt(s->sample_fmt_str);
+     if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
+         av_log(ctx, AV_LOG_ERROR, "Invalid sample format %s.\n",
+                s->sample_fmt_str);
+         ret = AVERROR(EINVAL);
+         goto fail;
+     }
+     s->channel_layout = av_get_channel_layout(s->channel_layout_str);
+     if (!s->channel_layout) {
+         av_log(ctx, AV_LOG_ERROR, "Invalid channel layout %s.\n",
+                s->channel_layout_str);
+         ret = AVERROR(EINVAL);
+         goto fail;
+     }
+     if (!(s->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) {
+         ret = AVERROR(ENOMEM);
+         goto fail;
+     }
+     if (!s->time_base.num)
+         s->time_base = (AVRational){1, s->sample_rate};
+     av_log(ctx, AV_LOG_VERBOSE, "tb:%d/%d samplefmt:%s samplerate: %d "
+            "ch layout:%s\n", s->time_base.num, s->time_base.den, s->sample_fmt_str,
+            s->sample_rate, s->channel_layout_str);
+ fail:
+     av_opt_free(s);
+     return ret;
+ }
+ static av_cold void uninit(AVFilterContext *ctx)
+ {
+     BufferSourceContext *s = ctx->priv;
+     while (s->fifo && av_fifo_size(s->fifo)) {
+         AVFilterBufferRef *buf;
+         av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
+         avfilter_unref_buffer(buf);
+     }
+     av_fifo_free(s->fifo);
+     s->fifo = NULL;
+ }
+ static int query_formats(AVFilterContext *ctx)
+ {
+     BufferSourceContext *c = ctx->priv;
+     AVFilterChannelLayouts *channel_layouts = NULL;
+     AVFilterFormats *formats = NULL;
+     AVFilterFormats *samplerates = NULL;
+     switch (ctx->outputs[0]->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         avfilter_add_format(&formats, c->pix_fmt);
+         avfilter_set_common_formats(ctx, formats);
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         avfilter_add_format(&formats,           c->sample_fmt);
+         avfilter_set_common_formats(ctx, formats);
+         avfilter_add_format(&samplerates,       c->sample_rate);
+         ff_set_common_samplerates(ctx, samplerates);
+         ff_add_channel_layout(&channel_layouts, c->channel_layout);
+         ff_set_common_channel_layouts(ctx, channel_layouts);
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     return 0;
+ }
+ static int config_props(AVFilterLink *link)
+ {
+     BufferSourceContext *c = link->src->priv;
+     switch (link->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         link->w = c->w;
+         link->h = c->h;
+         link->sample_aspect_ratio = c->pixel_aspect;
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         link->channel_layout = c->channel_layout;
+         link->sample_rate    = c->sample_rate;
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     link->time_base = c->time_base;
+     return 0;
+ }
+ static int request_frame(AVFilterLink *link)
+ {
+     BufferSourceContext *c = link->src->priv;
+     AVFilterBufferRef *buf;
+     if (!av_fifo_size(c->fifo)) {
+         if (c->eof)
+             return AVERROR_EOF;
+         return AVERROR(EAGAIN);
+     }
+     av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
+     switch (link->type) {
+     case AVMEDIA_TYPE_VIDEO:
+         avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
+         avfilter_draw_slice(link, 0, link->h, 1);
+         avfilter_end_frame(link);
+         break;
+     case AVMEDIA_TYPE_AUDIO:
+         ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
+         break;
+     default:
+         return AVERROR(EINVAL);
+     }
+     avfilter_unref_buffer(buf);
+     return 0;
+ }
+ static int poll_frame(AVFilterLink *link)
+ {
+     BufferSourceContext *c = link->src->priv;
+     int size = av_fifo_size(c->fifo);
+     if (!size && c->eof)
+         return AVERROR_EOF;
+     return size/sizeof(AVFilterBufferRef*);
+ }
+ AVFilter avfilter_vsrc_buffer = {
+     .name      = "buffer",
+     .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
+     .priv_size = sizeof(BufferSourceContext),
+     .query_formats = query_formats,
+     .init      = init_video,
+     .uninit    = uninit,
+     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
+     .outputs   = (AVFilterPad[]) {{ .name            = "default",
+                                     .type            = AVMEDIA_TYPE_VIDEO,
+                                     .request_frame   = request_frame,
+                                     .poll_frame      = poll_frame,
+                                     .config_props    = config_props, },
+                                   { .name = NULL}},
+ };
+ AVFilter avfilter_asrc_abuffer = {
+     .name          = "abuffer",
+     .description   = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
+     .priv_size     = sizeof(BufferSourceContext),
+     .query_formats = query_formats,
+     .init      = init_audio,
+     .uninit    = uninit,
+     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
+     .outputs   = (AVFilterPad[]) {{ .name            = "default",
+                                     .type            = AVMEDIA_TYPE_AUDIO,
+                                     .request_frame   = request_frame,
+                                     .poll_frame      = poll_frame,
+                                     .config_props    = config_props, },
+                                   { .name = NULL}},
+ };
  
  #include "avfilter.h"
  
 +enum {
 +
 +    /**
 +     * Do not check for format changes.
 +     */
 +    AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
 +
 +    /**
 +     * Do not copy buffer data.
 +     */
 +    AV_BUFFERSRC_FLAG_NO_COPY = 2,
 +
 +};
 +
 +/**
 + * Add buffer data in picref to buffer_src.
 + *
 + * @param buffer_src  pointer to a buffer source context
 + * @param picref      a buffer reference, or NULL to mark EOF
 + * @param flags       a combination of AV_BUFFERSRC_FLAG_*
 + * @return            >= 0 in case of success, a negative AVERROR code
 + *                    in case of failure
 + */
 +int av_buffersrc_add_ref(AVFilterContext *buffer_src,
 +                         AVFilterBufferRef *picref, int flags);
 +
  /**
 - * Add a buffer to the filtergraph s.
 + * Get the number of failed requests.
   *
 - * @param buf buffer containing frame data to be passed down the filtergraph.
 - * This function will take ownership of buf, the user must not free it.
 - * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
 + * A failed request is when the request_frame method is called while no
 + * frame is present in the buffer.
 + * The number is reset when a frame is added.
   */
 -int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
 +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
  
+ /**
+  * Add a frame to the buffer source.
+  *
+  * @param s an instance of the buffersrc filter.
+  * @param frame frame to be added.
+  *
+  * @warning frame data will be memcpy()ed, which may be a big performance
+  *          hit. Use av_buffersrc_buffer() to avoid copying the data.
+  */
+ int av_buffersrc_write_frame(AVFilterContext *s, AVFrame *frame);
  #endif /* AVFILTER_BUFFERSRC_H */
  
  #include "avfilter.h"
  #include "internal.h"
- void ff_avfilter_default_free_buffer(AVFilterBuffer *ptr)
- {
-     if (ptr->extended_data != ptr->data)
-         av_freep(&ptr->extended_data);
-     av_free(ptr->data[0]);
-     av_free(ptr);
- }
- /* TODO: set the buffer's priv member to a context structure for the whole
-  * filter chain.  This will allow for a buffer pool instead of the constant
-  * alloc & free cycle currently implemented. */
- AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
- {
-     int linesize[4];
-     uint8_t *data[4];
-     int i;
-     AVFilterBufferRef *picref = NULL;
-     AVFilterPool *pool = link->pool;
-     if (pool) {
-         for (i = 0; i < POOL_SIZE; i++) {
-             picref = pool->pic[i];
-             if (picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h) {
-                 AVFilterBuffer *pic = picref->buf;
-                 pool->pic[i] = NULL;
-                 pool->count--;
-                 picref->video->w = w;
-                 picref->video->h = h;
-                 picref->perms = perms | AV_PERM_READ;
-                 picref->format = link->format;
-                 pic->refcount = 1;
-                 memcpy(picref->data,     pic->data,     sizeof(picref->data));
-                 memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
-                 pool->refcount++;
-                 return picref;
-             }
-         }
-     } else {
-         pool = link->pool = av_mallocz(sizeof(AVFilterPool));
-         pool->refcount = 1;
-     }
-     // align: +2 is needed for swscaler, +16 to be SIMD-friendly
-     if ((i = av_image_alloc(data, linesize, w, h, link->format, 32)) < 0)
-         return NULL;
-     picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
-                                                        perms, w, h, link->format);
-     if (!picref) {
-         av_free(data[0]);
-         return NULL;
-     }
-     memset(data[0], 128, i);
-     picref->buf->priv = pool;
-     picref->buf->free = NULL;
-     pool->refcount++;
-     return picref;
- }
- void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
- {
-     AVFilterLink *outlink = NULL;
-     if (inlink->dst->output_count)
-         outlink = inlink->dst->outputs[0];
-     if (outlink) {
-         outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
-         avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
-         avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
-     }
- }
- void avfilter_default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
- {
-     AVFilterLink *outlink = NULL;
-     if (inlink->dst->output_count)
-         outlink = inlink->dst->outputs[0];
-     if (outlink)
-         avfilter_draw_slice(outlink, y, h, slice_dir);
- }
- void avfilter_default_end_frame(AVFilterLink *inlink)
- {
-     AVFilterLink *outlink = NULL;
-     if (inlink->dst->output_count)
-         outlink = inlink->dst->outputs[0];
-     avfilter_unref_buffer(inlink->cur_buf);
-     inlink->cur_buf = NULL;
-     if (outlink) {
-         if (outlink->out_buf) {
-             avfilter_unref_buffer(outlink->out_buf);
-             outlink->out_buf = NULL;
-         }
-         avfilter_end_frame(outlink);
-     }
- }
+ #include "formats.h"
  
 -/**
 - * default config_link() implementation for output video links to simplify
 - * the implementation of one input one output video filters */
 -int avfilter_default_config_output_link(AVFilterLink *link)
 +static void set_common_formats(AVFilterContext *ctx, AVFilterFormats *fmts,
 +                               enum AVMediaType type, int offin, int offout)
  {
 -    if (link->src->input_count && link->src->inputs[0]) {
 -        if (link->type == AVMEDIA_TYPE_VIDEO) {
 -            link->w = link->src->inputs[0]->w;
 -            link->h = link->src->inputs[0]->h;
 -            link->time_base = link->src->inputs[0]->time_base;
 -        }
 -    } else {
 -        /* XXX: any non-simple filter which would cause this branch to be taken
 -         * really should implement its own config_props() for this link. */
 -        return -1;
 +    int i;
 +    for (i = 0; i < ctx->input_count; i++)
 +        if (ctx->inputs[i] && ctx->inputs[i]->type == type)
 +            avfilter_formats_ref(fmts,
 +                                 (AVFilterFormats **)((uint8_t *)ctx->inputs[i]+offout));
 +
 +    for (i = 0; i < ctx->output_count; i++)
 +        if (ctx->outputs[i] && ctx->outputs[i]->type == type)
 +            avfilter_formats_ref(fmts,
 +                                 (AVFilterFormats **)((uint8_t *)ctx->outputs[i]+offin));
 +
 +    if (!fmts->refcount) {
 +        av_free(fmts->formats);
 +        av_free(fmts->refs);
 +        av_free(fmts);
      }
 +}
  
 -    return 0;
 +void avfilter_set_common_pixel_formats(AVFilterContext *ctx, AVFilterFormats *formats)
 +{
 +    set_common_formats(ctx, formats, AVMEDIA_TYPE_VIDEO,
 +                       offsetof(AVFilterLink, in_formats),
 +                       offsetof(AVFilterLink, out_formats));
 +}
 +
 +void avfilter_set_common_sample_formats(AVFilterContext *ctx, AVFilterFormats *formats)
 +{
 +    set_common_formats(ctx, formats, AVMEDIA_TYPE_AUDIO,
 +                       offsetof(AVFilterLink, in_formats),
 +                       offsetof(AVFilterLink, out_formats));
 +}
 +
 +void avfilter_set_common_channel_layouts(AVFilterContext *ctx, AVFilterFormats *formats)
 +{
 +    set_common_formats(ctx, formats, AVMEDIA_TYPE_AUDIO,
-                        offsetof(AVFilterLink, in_chlayouts),
-                        offsetof(AVFilterLink, out_chlayouts));
++                       offsetof(AVFilterLink, in_channel_layouts),
++                       offsetof(AVFilterLink, out_channel_layouts));
 +}
 +
 +void avfilter_set_common_packing_formats(AVFilterContext *ctx, AVFilterFormats *formats)
 +{
 +    set_common_formats(ctx, formats, AVMEDIA_TYPE_AUDIO,
 +                       offsetof(AVFilterLink, in_packing),
 +                       offsetof(AVFilterLink, out_packing));
  }
- int avfilter_default_query_formats(AVFilterContext *ctx)
- {
-     avfilter_set_common_pixel_formats(ctx, avfilter_make_all_formats(AVMEDIA_TYPE_VIDEO));
-     avfilter_set_common_sample_formats(ctx, avfilter_make_all_formats(AVMEDIA_TYPE_AUDIO));
-     avfilter_set_common_channel_layouts(ctx, avfilter_make_all_channel_layouts());
-     avfilter_set_common_packing_formats(ctx, avfilter_make_all_packing_formats());
-     return 0;
- }
- void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
- {
-     avfilter_start_frame(link->dst->outputs[0], picref);
- }
- void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
- {
-     avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir);
- }
- void avfilter_null_end_frame(AVFilterLink *link)
- {
-     avfilter_end_frame(link->dst->outputs[0]);
- }
- AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
- {
-     return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
- }
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
 +#include "libavutil/eval.h"
  #include "libavutil/pixdesc.h"
 +#include "libavutil/audioconvert.h"
  #include "avfilter.h"
  #include "internal.h"
+ #include "formats.h"
  
  /**
   * Add all refs from a to ret and destroy a.
   */
- static void merge_ref(AVFilterFormats *ret, AVFilterFormats *a)
- {
-     int i;
-     for (i = 0; i < a->refcount; i++) {
-         ret->refs[ret->refcount] = a->refs[i];
-         *ret->refs[ret->refcount++] = ret;
-     }
+ #define MERGE_REF(ret, a, fmts, type, fail)                                \
+ do {                                                                       \
+     type ***tmp;                                                           \
+     int i;                                                                 \
+                                                                            \
+     if (!(tmp = av_realloc(ret->refs,                                      \
+                            sizeof(*tmp) * (ret->refcount + a->refcount)))) \
+         goto fail;                                                         \
+     ret->refs = tmp;                                                       \
+                                                                            \
+     for (i = 0; i < a->refcount; i ++) {                                   \
+         ret->refs[ret->refcount] = a->refs[i];                             \
+         *ret->refs[ret->refcount++] = ret;                                 \
+     }                                                                      \
+                                                                            \
+     av_freep(&a->refs);                                                    \
+     av_freep(&a->fmts);                                                    \
+     av_freep(&a);                                                          \
+ } while (0)
  
-     av_free(a->refs);
-     av_free(a->formats);
-     av_free(a);
- }
+ /**
+  * Add all formats common for a and b to ret, copy the refs and destroy
+  * a and b.
+  */
+ #define MERGE_FORMATS(ret, a, b, fmts, nb, type, fail)                          \
+ do {                                                                            \
+     int i, j, k = 0, count = FFMIN(a->nb, b->nb);                               \
+                                                                                 \
+     if (!(ret = av_mallocz(sizeof(*ret))))                                      \
+         goto fail;                                                              \
+                                                                                 \
+     if (count) {                                                                \
+         if (!(ret->fmts = av_malloc(sizeof(*ret->fmts) * count)))               \
+             goto fail;                                                          \
+         for (i = 0; i < a->nb; i++)                                             \
+             for (j = 0; j < b->nb; j++)                                         \
 -                if (a->fmts[i] == b->fmts[j])                                   \
++                if (a->fmts[i] == b->fmts[j]) {                                 \
++                    if(k >= FFMIN(a->nb, b->nb)){                               \
++                        av_log(0, AV_LOG_ERROR, "Duplicate formats in avfilter_merge_formats() detected\n"); \
++                        av_free(ret->fmts);                                     \
++                        av_free(ret);                                           \
++                        return NULL;                                            \
++                    }                                                           \
+                     ret->fmts[k++] = a->fmts[i];                                \
 -                                                                                \
 -        ret->nb = k;                                                            \
++                }                                                               \
+     }                                                                           \
++    ret->nb = k;                                                                \
+     /* check that there was at least one common format */                       \
+     if (!ret->nb)                                                               \
+         goto fail;                                                              \
+                                                                                 \
+     MERGE_REF(ret, a, fmts, type, fail);                                        \
+     MERGE_REF(ret, b, fmts, type, fail);                                        \
+ } while (0)
  
  AVFilterFormats *avfilter_merge_formats(AVFilterFormats *a, AVFilterFormats *b)
  {
@@@ -94,93 -161,52 +169,103 @@@ int ff_fmt_is_in(int fmt, const int *fm
      return 0;
  }
  
 +#define COPY_INT_LIST(list_copy, list, type) {                          \
 +    int count = 0;                                                      \
 +    if (list)                                                           \
 +        for (count = 0; list[count] != -1; count++)                     \
 +            ;                                                           \
 +    list_copy = av_calloc(count+1, sizeof(type));                       \
 +    if (list_copy) {                                                    \
 +        memcpy(list_copy, list, sizeof(type) * count);                  \
 +        list_copy[count] = -1;                                          \
 +    }                                                                   \
 +}
 +
 +int *ff_copy_int_list(const int * const list)
 +{
 +    int *ret = NULL;
 +    COPY_INT_LIST(ret, list, int);
 +    return ret;
 +}
 +
 +int64_t *ff_copy_int64_list(const int64_t * const list)
 +{
 +    int64_t *ret = NULL;
 +    COPY_INT_LIST(ret, list, int64_t);
 +    return ret;
 +}
 +
 +#define MAKE_FORMAT_LIST()                                              \
 +    AVFilterFormats *formats;                                           \
 +    int count = 0;                                                      \
 +    if (fmts)                                                           \
 +        for (count = 0; fmts[count] != -1; count++)                     \
 +            ;                                                           \
 +    formats = av_mallocz(sizeof(AVFilterFormats));                      \
 +    if (!formats) return NULL;                                          \
 +    formats->format_count = count;                                      \
 +    if (count) {                                                        \
 +        formats->formats = av_malloc(sizeof(*formats->formats)*count);  \
 +        if (!formats->formats) {                                        \
 +            av_free(formats);                                           \
 +            return NULL;                                                \
 +        }                                                               \
 +    }
 +
  AVFilterFormats *avfilter_make_format_list(const int *fmts)
  {
 -    AVFilterFormats *formats;
 -    int count;
 +    MAKE_FORMAT_LIST();
 +    while (count--)
 +        formats->formats[count] = fmts[count];
  
 -    for (count = 0; fmts[count] != -1; count++)
 -        ;
 +    return formats;
 +}
  
- AVFilterFormats *avfilter_make_format64_list(const int64_t *fmts)
 -    formats               = av_mallocz(sizeof(*formats));
++AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts)
 +{
 +    MAKE_FORMAT_LIST();
      if (count)
 -        formats->formats  = av_malloc(sizeof(*formats->formats) * count);
 -    formats->format_count = count;
 -    memcpy(formats->formats, fmts, sizeof(*formats->formats) * count);
 +        memcpy(formats->formats, fmts, sizeof(*formats->formats) * count);
  
--    return formats;
++    return (AVFilterChannelLayouts*)formats;
  }
  
 -int avfilter_add_format(AVFilterFormats **avff, int fmt)
+ #define ADD_FORMAT(f, fmt, type, list, nb)                  \
+ do {                                                        \
+     type *fmts;                                             \
+                                                             \
+     if (!(*f) && !(*f = av_mallocz(sizeof(**f))))           \
+         return AVERROR(ENOMEM);                             \
+                                                             \
+     fmts = av_realloc((*f)->list,                           \
+                       sizeof(*(*f)->list) * ((*f)->nb + 1));\
+     if (!fmts)                                              \
+         return AVERROR(ENOMEM);                             \
+                                                             \
+     (*f)->list = fmts;                                      \
+     (*f)->list[(*f)->nb++] = fmt;                           \
+     return 0;                                               \
+ } while (0)
 +int avfilter_add_format(AVFilterFormats **avff, int64_t fmt)
  {
-     int64_t *fmts;
-     if (!(*avff) && !(*avff = av_mallocz(sizeof(**avff))))
-         return AVERROR(ENOMEM);
-     fmts = av_realloc((*avff)->formats,
-                       sizeof(*(*avff)->formats) * ((*avff)->format_count+1));
-     if (!fmts)
-         return AVERROR(ENOMEM);
 -    ADD_FORMAT(avff, fmt, int, formats, format_count);
++    ADD_FORMAT(avff, fmt, int64_t, formats, format_count);
+ }
  
-     (*avff)->formats = fmts;
-     (*avff)->formats[(*avff)->format_count++] = fmt;
-     return 0;
+ int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout)
+ {
+     ADD_FORMAT(l, channel_layout, uint64_t, channel_layouts, nb_channel_layouts);
  }
  
 +#if FF_API_OLD_ALL_FORMATS_API
  AVFilterFormats *avfilter_all_formats(enum AVMediaType type)
  {
 +    return avfilter_make_all_formats(type);
 +}
 +#endif
 +
 +AVFilterFormats *avfilter_make_all_formats(enum AVMediaType type)
 +{
      AVFilterFormats *ret = NULL;
      int fmt;
      int num_formats = type == AVMEDIA_TYPE_VIDEO ? PIX_FMT_NB    :
      return ret;
  }
  
- AVFilterFormats *avfilter_make_all_channel_layouts(void)
- {
-     return avfilter_make_format64_list(avfilter_all_channel_layouts);
- }
 +const int64_t avfilter_all_channel_layouts[] = {
 +#include "all_channel_layouts.h"
 +    -1
 +};
 +
- void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
++// AVFilterFormats *avfilter_make_all_channel_layouts(void)
++// {
++//     return avfilter_make_format64_list(avfilter_all_channel_layouts);
++// }
 +
 +AVFilterFormats *avfilter_make_all_packing_formats(void)
 +{
 +    static const int packing[] = {
 +        AVFILTER_PACKED,
 +        AVFILTER_PLANAR,
 +        -1,
 +    };
 +
 +    return avfilter_make_format_list(packing);
 +}
 +
+ AVFilterFormats *ff_all_samplerates(void)
  {
-     *ref = f;
-     f->refs = av_realloc(f->refs, sizeof(*f->refs) * ++f->refcount);
-     f->refs[f->refcount-1] = ref;
+     AVFilterFormats *ret = av_mallocz(sizeof(*ret));
+     return ret;
  }
  
static int find_ref_index(AVFilterFormats **ref)
AVFilterChannelLayouts *ff_all_channel_layouts(void)
  {
-     int i;
-     for (i = 0; i < (*ref)->refcount; i++)
-         if ((*ref)->refs[i] == ref)
-             return i;
-     return -1;
+     AVFilterChannelLayouts *ret = av_mallocz(sizeof(*ret));
+     return ret;
  }
  
- void avfilter_formats_unref(AVFilterFormats **ref)
+ #define FORMATS_REF(f, ref)                                          \
+ do {                                                                 \
+     *ref = f;                                                        \
+     f->refs = av_realloc(f->refs, sizeof(*f->refs) * ++f->refcount); \
+     f->refs[f->refcount-1] = ref;                                    \
+ } while (0)
+ void ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
+ {
+     FORMATS_REF(f, ref);
+ }
+ void avfilter_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
  {
-     int idx;
+     FORMATS_REF(f, ref);
+ }
  
-     if (!*ref)
-         return;
+ #define FIND_REF_INDEX(ref, idx)            \
+ do {                                        \
+     int i;                                  \
+     for (i = 0; i < (*ref)->refcount; i ++) \
+         if((*ref)->refs[i] == ref) {        \
+             idx = i;                        \
+             break;                          \
+         }                                   \
+ } while (0)
+ #define FORMATS_UNREF(ref, list)                                   \
+ do {                                                               \
+     int idx = -1;                                                  \
+                                                                    \
+     if (!*ref)                                                     \
+         return;                                                    \
+                                                                    \
+     FIND_REF_INDEX(ref, idx);                                      \
+                                                                    \
+     if (idx >= 0)                                                  \
+         memmove((*ref)->refs + idx, (*ref)->refs + idx + 1,        \
+             sizeof(*(*ref)->refs) * ((*ref)->refcount - idx - 1)); \
+                                                                    \
+     if(!--(*ref)->refcount) {                                      \
+         av_free((*ref)->list);                                     \
+         av_free((*ref)->refs);                                     \
+         av_free(*ref);                                             \
+     }                                                              \
+     *ref = NULL;                                                   \
+ } while (0)
  
-     idx = find_ref_index(ref);
+ void avfilter_formats_unref(AVFilterFormats **ref)
+ {
+     FORMATS_UNREF(ref, formats);
+ }
  
-     if(idx >= 0)
-         memmove((*ref)->refs + idx, (*ref)->refs + idx + 1,
-             sizeof(*(*ref)->refs) * ((*ref)->refcount - idx - 1));
+ void ff_channel_layouts_unref(AVFilterChannelLayouts **ref)
+ {
+     FORMATS_UNREF(ref, channel_layouts);
+ }
  
-     if (!--(*ref)->refcount) {
-         av_free((*ref)->formats);
-         av_free((*ref)->refs);
-         av_free(*ref);
-     }
-     *ref = NULL;
+ #define FORMATS_CHANGEREF(oldref, newref)       \
+ do {                                            \
+     int idx = -1;                               \
+                                                 \
+     FIND_REF_INDEX(oldref, idx);                \
+                                                 \
+     if (idx >= 0) {                             \
+         (*oldref)->refs[idx] = newref;          \
+         *newref = *oldref;                      \
+         *oldref = NULL;                         \
+     }                                           \
+ } while (0)
+ void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
+                                   AVFilterChannelLayouts **newref)
+ {
+     FORMATS_CHANGEREF(oldref, newref);
  }
  
  void avfilter_formats_changeref(AVFilterFormats **oldref,
                                  AVFilterFormats **newref)
  {
-     int idx = find_ref_index(oldref);
+     FORMATS_CHANGEREF(oldref, newref);
+ }
+ #define SET_COMMON_FORMATS(ctx, fmts, in_fmts, out_fmts, ref, list) \
+ {                                                                   \
+     int count = 0, i;                                               \
+                                                                     \
+     for (i = 0; i < ctx->input_count; i++) {                        \
+         if (ctx->inputs[i]) {                                       \
+             ref(fmts, &ctx->inputs[i]->out_fmts);                   \
+             count++;                                                \
+         }                                                           \
+     }                                                               \
+     for (i = 0; i < ctx->output_count; i++) {                       \
+         if (ctx->outputs[i]) {                                      \
+             ref(fmts, &ctx->outputs[i]->in_fmts);                   \
+             count++;                                                \
+         }                                                           \
+     }                                                               \
+                                                                     \
+     if (!count) {                                                   \
+         av_freep(&fmts->list);                                      \
+         av_freep(&fmts->refs);                                      \
+         av_freep(&fmts);                                            \
+     }                                                               \
+ }
+ void ff_set_common_channel_layouts(AVFilterContext *ctx,
+                                    AVFilterChannelLayouts *layouts)
+ {
+     SET_COMMON_FORMATS(ctx, layouts, in_channel_layouts, out_channel_layouts,
+                        ff_channel_layouts_ref, channel_layouts);
+ }
+ void ff_set_common_samplerates(AVFilterContext *ctx,
+                                AVFilterFormats *samplerates)
+ {
+     SET_COMMON_FORMATS(ctx, samplerates, in_samplerates, out_samplerates,
+                        avfilter_formats_ref, formats);
+ }
+ /**
+  * A helper for query_formats() which sets all links to the same list of
+  * formats. If there are no links hooked to this filter, the list of formats is
+  * freed.
+  */
+ void avfilter_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
+ {
+     SET_COMMON_FORMATS(ctx, formats, in_formats, out_formats,
+                        avfilter_formats_ref, formats);
+ }
  
-     if (idx >= 0) {
-         (*oldref)->refs[idx] = newref;
-         *newref = *oldref;
-         *oldref = NULL;
+ int avfilter_default_query_formats(AVFilterContext *ctx)
+ {
+     enum AVMediaType type = ctx->inputs  && ctx->inputs [0] ? ctx->inputs [0]->type :
+                             ctx->outputs && ctx->outputs[0] ? ctx->outputs[0]->type :
+                             AVMEDIA_TYPE_VIDEO;
+     avfilter_set_common_formats(ctx, avfilter_all_formats(type));
+     if (type == AVMEDIA_TYPE_AUDIO) {
+         ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
+         ff_set_common_samplerates(ctx, ff_all_samplerates());
      }
+     return 0;
  }
 +
 +/* internal functions for parsing audio format arguments */
 +
 +int ff_parse_pixel_format(enum PixelFormat *ret, const char *arg, void *log_ctx)
 +{
 +    char *tail;
 +    int pix_fmt = av_get_pix_fmt(arg);
 +    if (pix_fmt == PIX_FMT_NONE) {
 +        pix_fmt = strtol(arg, &tail, 0);
 +        if (*tail || (unsigned)pix_fmt >= PIX_FMT_NB) {
 +            av_log(log_ctx, AV_LOG_ERROR, "Invalid pixel format '%s'\n", arg);
 +            return AVERROR(EINVAL);
 +        }
 +    }
 +    *ret = pix_fmt;
 +    return 0;
 +}
 +
 +int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx)
 +{
 +    char *tail;
 +    int sfmt = av_get_sample_fmt(arg);
 +    if (sfmt == AV_SAMPLE_FMT_NONE) {
 +        sfmt = strtol(arg, &tail, 0);
 +        if (*tail || (unsigned)sfmt >= AV_SAMPLE_FMT_NB) {
 +            av_log(log_ctx, AV_LOG_ERROR, "Invalid sample format '%s'\n", arg);
 +            return AVERROR(EINVAL);
 +        }
 +    }
 +    *ret = sfmt;
 +    return 0;
 +}
 +
 +int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx)
 +{
 +    char *tail;
 +    double srate = av_strtod(arg, &tail);
 +    if (*tail || srate < 1 || (int)srate != srate || srate > INT_MAX) {
 +        av_log(log_ctx, AV_LOG_ERROR, "Invalid sample rate '%s'\n", arg);
 +        return AVERROR(EINVAL);
 +    }
 +    *ret = srate;
 +    return 0;
 +}
 +
 +int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx)
 +{
 +    char *tail;
 +    int64_t chlayout = av_get_channel_layout(arg);
 +    if (chlayout == 0) {
 +        chlayout = strtol(arg, &tail, 10);
 +        if (*tail || chlayout == 0) {
 +            av_log(log_ctx, AV_LOG_ERROR, "Invalid channel layout '%s'\n", arg);
 +            return AVERROR(EINVAL);
 +        }
 +    }
 +    *ret = chlayout;
 +    return 0;
 +}
 +
 +int ff_parse_packing_format(int *ret, const char *arg, void *log_ctx)
 +{
 +    char *tail;
 +    int planar = strtol(arg, &tail, 10);
 +    if (*tail) {
 +        planar = !strcmp(arg, "packed") ? 0:
 +                 !strcmp(arg, "planar") ? 1: -1;
 +    }
 +
 +    if (planar != 0 && planar != 1) {
 +        av_log(log_ctx, AV_LOG_ERROR, "Invalid packing format '%s'\n", arg);
 +        return AVERROR(EINVAL);
 +    }
 +    *ret = planar;
 +    return 0;
 +}
 +
 +#ifdef TEST
 +
 +#undef printf
 +
 +int main(void)
 +{
 +    const int64_t *cl;
 +    char buf[512];
 +
 +    for (cl = avfilter_all_channel_layouts; *cl != -1; cl++) {
 +        av_get_channel_layout_string(buf, sizeof(buf), -1, *cl);
 +        printf("%s\n", buf);
 +    }
 +
 +    return 0;
 +}
 +
 +#endif
 +
index 0000000,7e0a601..7b0eab5
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,78 +1,81 @@@
 - * This file is part of Libav.
+ /*
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFMpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ #ifndef AVFILTER_FORMATS_H
+ #define AVFILTER_FORMATS_H
+ #include "avfilter.h"
+ typedef struct AVFilterChannelLayouts {
+     uint64_t *channel_layouts;  ///< list of channel layouts
+     int    nb_channel_layouts;  ///< number of channel layouts
+     unsigned refcount;          ///< number of references to this list
+     struct AVFilterChannelLayouts ***refs; ///< references to this list
+ } AVFilterChannelLayouts;
+ /**
+  * Return a channel layouts/samplerates list which contains the intersection of
+  * the layouts/samplerates of a and b. Also, all the references of a, all the
+  * references of b, and a and b themselves will be deallocated.
+  *
+  * If a and b do not share any common elements, neither is modified, and NULL
+  * is returned.
+  */
+ AVFilterChannelLayouts *ff_merge_channel_layouts(AVFilterChannelLayouts *a,
+                                                  AVFilterChannelLayouts *b);
+ AVFilterFormats *ff_merge_samplerates(AVFilterFormats *a,
+                                       AVFilterFormats *b);
+ /**
+  * Construct an empty AVFilterChannelLayouts/AVFilterFormats struct --
+  * representing any channel layout/sample rate.
+  */
+ AVFilterChannelLayouts *ff_all_channel_layouts(void);
+ AVFilterFormats *ff_all_samplerates(void);
++AVFilterChannelLayouts *avfilter_make_format64_list(const int64_t *fmts);
++
++
+ /**
+  * A helper for query_formats() which sets all links to the same list of channel
+  * layouts/sample rates. If there are no links hooked to this filter, the list
+  * is freed.
+  */
+ void ff_set_common_channel_layouts(AVFilterContext *ctx,
+                                    AVFilterChannelLayouts *layouts);
+ void ff_set_common_samplerates(AVFilterContext *ctx,
+                                AVFilterFormats *samplerates);
+ int ff_add_channel_layout(AVFilterChannelLayouts **l, uint64_t channel_layout);
+ /**
+  * Add *ref as a new reference to f.
+  */
+ void ff_channel_layouts_ref(AVFilterChannelLayouts *f,
+                             AVFilterChannelLayouts **ref);
+ /**
+  * Remove a reference to a channel layouts list.
+  */
+ void ff_channel_layouts_unref(AVFilterChannelLayouts **ref);
+ void ff_channel_layouts_changeref(AVFilterChannelLayouts **oldref,
+                                   AVFilterChannelLayouts **newref);
+ #endif // AVFILTER_FORMATS_H
  
  #include "avfilter.h"
  #include "avfiltergraph.h"
++#include "formats.h"
 +
 +#define POOL_SIZE 32
 +typedef struct AVFilterPool {
 +    AVFilterBufferRef *pic[POOL_SIZE];
 +    int count;
 +    int refcount;
 +    int draining;
 +} AVFilterPool;
 +
 +typedef struct AVFilterCommand {
 +    double time;                ///< time expressed in seconds
 +    char *command;              ///< command
 +    char *arg;                  ///< optional argument for the command
 +    int flags;
 +    struct AVFilterCommand *next;
 +} AVFilterCommand;
  
  /**
   * Check for the validity of graph.
@@@ -76,81 -55,6 +77,85 @@@ void ff_avfilter_default_free_buffer(AV
  /** Tell is a format is contained in the provided list terminated by -1. */
  int ff_fmt_is_in(int fmt, const int *fmts);
  
 +/**
 + * Return a copy of a list of integers terminated by -1, or NULL in
 + * case of copy failure.
 + */
 +int *ff_copy_int_list(const int * const list);
 +
 +/**
 + * Return a copy of a list of 64-bit integers, or NULL in case of
 + * copy failure.
 + */
 +int64_t *ff_copy_int64_list(const int64_t * const list);
 +
 +/* Functions to parse audio format arguments */
 +
 +/**
 + * Parse a pixel format.
 + *
 + * @param ret pixel format pointer to where the value should be written
 + * @param arg string to parse
 + * @param log_ctx log context
 + * @return 0 in case of success, a negative AVERROR code on error
 + */
 +int ff_parse_pixel_format(enum PixelFormat *ret, const char *arg, void *log_ctx);
 +
 +/**
 + * Parse a sample rate.
 + *
 + * @param ret unsigned integer pointer to where the value should be written
 + * @param arg string to parse
 + * @param log_ctx log context
 + * @return 0 in case of success, a negative AVERROR code on error
 + */
 +int ff_parse_sample_rate(int *ret, const char *arg, void *log_ctx);
 +
 +/**
 + * Parse a sample format name or a corresponding integer representation.
 + *
 + * @param ret integer pointer to where the value should be written
 + * @param arg string to parse
 + * @param log_ctx log context
 + * @return 0 in case of success, a negative AVERROR code on error
 + */
 +int ff_parse_sample_format(int *ret, const char *arg, void *log_ctx);
 +
 +/**
 + * Parse a channel layout or a corresponding integer representation.
 + *
 + * @param ret 64bit integer pointer to where the value should be written.
 + * @param arg string to parse
 + * @param log_ctx log context
 + * @return 0 in case of success, a negative AVERROR code on error
 + */
 +int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx);
 +
 +/**
 + * Parse a packing format or a corresponding integer representation.
 + *
 + * @param ret integer pointer to where the value should be written
 + * @param arg string to parse
 + * @param log_ctx log context
 + * @return 0 in case of success, a negative AVERROR code on error
 + */
 +int ff_parse_packing_format(int *ret, const char *arg, void *log_ctx);
 +
 +/**
 + * Pass video frame along and keep an internal reference for later use.
 + */
 +static inline void ff_null_start_frame_keep_ref(AVFilterLink *inlink,
 +                                                AVFilterBufferRef *picref)
 +{
 +    avfilter_start_frame(inlink->dst->outputs[0], avfilter_ref_buffer(picref, ~0));
 +}
 +
 +void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
 +
++void ff_free_pool(AVFilterPool *pool);
++
++void ff_command_queue_pop(AVFilterContext *filter);
++
  #define FF_DPRINTF_START(ctx, func) av_dlog(NULL, "%-16s: ", #func)
  
  void ff_dlog_link(void *ctx, AVFilterLink *link, int end);
index b0cc519,0000000..ab7aa4f
mode 100644,000000..100644
--- /dev/null
@@@ -1,304 -1,0 +1,307 @@@
-     if (!(formats = avfilter_make_format64_list(buf->channel_layouts)))
 +/*
 + * Copyright (c) 2011 Stefano Sabatini
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * buffer video sink
 + */
 +
 +#include "libavutil/fifo.h"
 +#include "avfilter.h"
 +#include "buffersink.h"
 +#include "internal.h"
 +
 +AVBufferSinkParams *av_buffersink_params_alloc(void)
 +{
 +    static const int pixel_fmts[] = { -1 };
 +    AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams));
 +    if (!params)
 +        return NULL;
 +
 +    params->pixel_fmts = pixel_fmts;
 +    return params;
 +}
 +
 +AVABufferSinkParams *av_abuffersink_params_alloc(void)
 +{
 +    static const int sample_fmts[] = { -1 };
 +    static const int packing_fmts[] = { -1 };
 +    static const int64_t channel_layouts[] = { -1 };
 +    AVABufferSinkParams *params = av_malloc(sizeof(AVABufferSinkParams));
 +
 +    if (!params)
 +        return NULL;
 +
 +    params->sample_fmts = sample_fmts;
 +    params->channel_layouts = channel_layouts;
 +    params->packing_fmts = packing_fmts;
 +    return params;
 +}
 +
 +typedef struct {
 +    AVFifoBuffer *fifo;                      ///< FIFO buffer of video frame references
 +
 +    /* only used for video */
 +    enum PixelFormat *pixel_fmts;           ///< list of accepted pixel formats, must be terminated with -1
 +
 +    /* only used for audio */
 +    enum AVSampleFormat *sample_fmts;       ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
 +    int64_t *channel_layouts;               ///< list of accepted channel layouts, terminated by -1
 +    int *packing_fmts;                      ///< list of accepted packing formats, terminated by -1
 +} BufferSinkContext;
 +
 +#define FIFO_INIT_SIZE 8
 +
 +static av_cold int common_init(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +
 +    buf->fifo = av_fifo_alloc(FIFO_INIT_SIZE*sizeof(AVFilterBufferRef *));
 +    if (!buf->fifo) {
 +        av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo\n");
 +        return AVERROR(ENOMEM);
 +    }
 +    return 0;
 +}
 +
 +static av_cold void common_uninit(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterBufferRef *picref;
 +
 +    if (buf->fifo) {
 +        while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) {
 +            av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL);
 +            avfilter_unref_buffer(picref);
 +        }
 +        av_fifo_free(buf->fifo);
 +        buf->fifo = NULL;
 +    }
 +}
 +
 +static void end_frame(AVFilterLink *inlink)
 +{
 +    AVFilterContext *ctx = inlink->dst;
 +    BufferSinkContext *buf = inlink->dst->priv;
 +
 +    if (av_fifo_space(buf->fifo) < sizeof(AVFilterBufferRef *)) {
 +        /* realloc fifo size */
 +        if (av_fifo_realloc2(buf->fifo, av_fifo_size(buf->fifo) * 2) < 0) {
 +            av_log(ctx, AV_LOG_ERROR,
 +                   "Cannot buffer more frames. Consume some available frames "
 +                   "before adding new ones.\n");
 +            return;
 +        }
 +    }
 +
 +    /* cache frame */
 +    av_fifo_generic_write(buf->fifo,
 +                          &inlink->cur_buf, sizeof(AVFilterBufferRef *), NULL);
 +}
 +
 +int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
 +                                  AVFilterBufferRef **bufref, int flags)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterLink *inlink = ctx->inputs[0];
 +    int ret;
 +    *bufref = NULL;
 +
 +    /* no picref available, fetch it from the filterchain */
 +    if (!av_fifo_size(buf->fifo)) {
 +        if (flags & AV_BUFFERSINK_FLAG_NO_REQUEST)
 +            return AVERROR(EAGAIN);
 +        if ((ret = avfilter_request_frame(inlink)) < 0)
 +            return ret;
 +    }
 +
 +    if (!av_fifo_size(buf->fifo))
 +        return AVERROR(EINVAL);
 +
 +    if (flags & AV_BUFFERSINK_FLAG_PEEK)
 +        *bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
 +    else
 +        av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL);
 +
 +    return 0;
 +}
 +
 +int av_buffersink_poll_frame(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterLink *inlink = ctx->inputs[0];
 +
 +    return av_fifo_size(buf->fifo)/sizeof(AVFilterBufferRef *) + avfilter_poll_frame(inlink);
 +}
 +
 +#if FF_API_OLD_VSINK_API
 +int av_vsink_buffer_get_video_buffer_ref(AVFilterContext *ctx,
 +                                         AVFilterBufferRef **picref, int flags)
 +{
 +    return av_buffersink_get_buffer_ref(ctx, picref, flags);
 +}
 +#endif
 +
 +#if CONFIG_BUFFERSINK_FILTER
 +
 +static av_cold int vsink_init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    av_unused AVBufferSinkParams *params;
 +
 +    if (!opaque) {
 +        av_log(ctx, AV_LOG_WARNING,
 +               "No opaque field provided\n");
 +        buf->pixel_fmts = NULL;
 +    } else {
 +#if FF_API_OLD_VSINK_API
 +        const int *pixel_fmts = (const enum PixelFormat *)opaque;
 +#else
 +        params = (AVBufferSinkParams *)opaque;
 +        const int *pixel_fmts = params->pixel_fmts;
 +#endif
 +        buf->pixel_fmts = ff_copy_int_list(pixel_fmts);
 +        if (!buf->pixel_fmts)
 +            return AVERROR(ENOMEM);
 +    }
 +
 +    return common_init(ctx);
 +}
 +
 +static av_cold void vsink_uninit(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    av_freep(&buf->pixel_fmts);
 +    return common_uninit(ctx);
 +}
 +
 +static int vsink_query_formats(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +
 +    if (buf->pixel_fmts)
 +        avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(buf->pixel_fmts));
 +    else
 +        avfilter_default_query_formats(ctx);
 +
 +    return 0;
 +}
 +
 +AVFilter avfilter_vsink_buffersink = {
 +    .name      = "buffersink",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."),
 +    .priv_size = sizeof(BufferSinkContext),
 +    .init      = vsink_init,
 +    .uninit    = vsink_uninit,
 +
 +    .query_formats = vsink_query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name    = "default",
 +                                    .type          = AVMEDIA_TYPE_VIDEO,
 +                                    .end_frame     = end_frame,
 +                                    .min_perms     = AV_PERM_READ, },
 +                                  { .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name = NULL }},
 +};
 +
 +#endif /* CONFIG_BUFFERSINK_FILTER */
 +
 +#if CONFIG_ABUFFERSINK_FILTER
 +
 +static void filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
 +{
 +    end_frame(link);
 +}
 +
 +static av_cold int asink_init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVABufferSinkParams *params;
 +
 +    if (!opaque) {
 +        av_log(ctx, AV_LOG_ERROR,
 +               "No opaque field provided, an AVABufferSinkParams struct is required\n");
 +        return AVERROR(EINVAL);
 +    } else
 +        params = (AVABufferSinkParams *)opaque;
 +
 +    buf->sample_fmts     = ff_copy_int_list  (params->sample_fmts);
 +    buf->channel_layouts = ff_copy_int64_list(params->channel_layouts);
 +    buf->packing_fmts    = ff_copy_int_list  (params->packing_fmts);
 +    if (!buf->sample_fmts || !buf->channel_layouts || !buf->sample_fmts) {
 +        av_freep(&buf->sample_fmts);
 +        av_freep(&buf->channel_layouts);
 +        av_freep(&buf->packing_fmts);
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    return common_init(ctx);
 +}
 +
 +static av_cold void asink_uninit(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +
 +    av_freep(&buf->sample_fmts);
 +    av_freep(&buf->channel_layouts);
 +    av_freep(&buf->packing_fmts);
 +    return common_uninit(ctx);
 +}
 +
 +static int asink_query_formats(AVFilterContext *ctx)
 +{
 +    BufferSinkContext *buf = ctx->priv;
 +    AVFilterFormats *formats = NULL;
++    AVFilterChannelLayouts *layouts = NULL;
 +
 +    if (!(formats = avfilter_make_format_list(buf->sample_fmts)))
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +
-     avfilter_set_common_channel_layouts(ctx, formats);
++    if (!(layouts = avfilter_make_format64_list(buf->channel_layouts)))
 +        return AVERROR(ENOMEM);
++    ff_set_common_channel_layouts(ctx, layouts);
 +
 +    if (!(formats = avfilter_make_format_list(buf->packing_fmts)))
 +        return AVERROR(ENOMEM);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +
++    ff_set_common_samplerates          (ctx, ff_all_samplerates());
++
 +    return 0;
 +}
 +
 +AVFilter avfilter_asink_abuffersink = {
 +    .name      = "abuffersink",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."),
 +    .init      = asink_init,
 +    .uninit    = asink_uninit,
 +    .priv_size = sizeof(BufferSinkContext),
 +    .query_formats = asink_query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name     = "default",
 +                                    .type           = AVMEDIA_TYPE_AUDIO,
 +                                    .filter_samples = filter_samples,
 +                                    .min_perms      = AV_PERM_READ, },
 +                                  { .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name = NULL }},
 +};
 +
 +#endif /* CONFIG_ABUFFERSINK_FILTER */
index f8d572d,0000000..cadc4ee
mode 100644,000000..100644
--- /dev/null
@@@ -1,676 -1,0 +1,674 @@@
-     formats = NULL;
-     avfilter_add_format(&formats, abuffer->channel_layout);
-     avfilter_set_common_channel_layouts(ctx, formats);
 +/*
 + * Copyright (c) 2008 Vitor Sessak
 + * Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
 + * Copyright (c) 2011 Mina Nagy Zaki
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * memory buffer source filter
 + */
 +
 +#include "avfilter.h"
 +#include "internal.h"
 +#include "audio.h"
 +#include "avcodec.h"
 +#include "buffersrc.h"
 +#include "vsrc_buffer.h"
 +#include "asrc_abuffer.h"
 +#include "libavutil/audioconvert.h"
 +#include "libavutil/avstring.h"
 +#include "libavutil/fifo.h"
 +#include "libavutil/imgutils.h"
 +
 +typedef struct {
 +    AVFifoBuffer     *fifo;
 +    AVRational        time_base;     ///< time_base to set in the output link
 +    int eof;
 +    unsigned          nb_failed_requests;
 +
 +    /* Video only */
 +    AVFilterContext  *scale;
 +    int               h, w;
 +    enum PixelFormat  pix_fmt;
 +    AVRational        sample_aspect_ratio;
 +    char              sws_param[256];
 +
 +    /* Audio only */
 +    // Audio format of incoming buffers
 +    int sample_rate;
 +    unsigned int sample_format;
 +    int64_t channel_layout;
 +    int packing_format;
 +
 +    // Normalization filters
 +    AVFilterContext *aconvert;
 +    AVFilterContext *aresample;
 +} BufferSourceContext;
 +
 +#define FIFO_SIZE 8
 +
 +#define CHECK_PARAM_CHANGE(s, c, width, height, format)\
 +    if (c->w != width || c->h != height || c->pix_fmt != format) {\
 +        av_log(s, AV_LOG_ERROR, "Changing frame properties on the fly is not supported.\n");\
 +        return AVERROR(EINVAL);\
 +    }
 +
 +static void buf_free(AVFilterBuffer *ptr)
 +{
 +    av_free(ptr);
 +    return;
 +}
 +
 +static void set_link_source(AVFilterContext *src, AVFilterLink *link)
 +{
 +    link->src       = src;
 +    link->srcpad    = &(src->output_pads[0]);
 +    src->outputs[0] = link;
 +}
 +
 +static int reconfigure_filter(BufferSourceContext *abuffer, AVFilterContext *filt_ctx)
 +{
 +    int ret;
 +    AVFilterLink * const inlink  = filt_ctx->inputs[0];
 +    AVFilterLink * const outlink = filt_ctx->outputs[0];
 +
 +    inlink->format         = abuffer->sample_format;
 +    inlink->channel_layout = abuffer->channel_layout;
 +    inlink->planar         = abuffer->packing_format;
 +    inlink->sample_rate    = abuffer->sample_rate;
 +
 +    filt_ctx->filter->uninit(filt_ctx);
 +    memset(filt_ctx->priv, 0, filt_ctx->filter->priv_size);
 +    if ((ret = filt_ctx->filter->init(filt_ctx, NULL , NULL)) < 0)
 +        return ret;
 +    if ((ret = inlink->srcpad->config_props(inlink)) < 0)
 +        return ret;
 +    return outlink->srcpad->config_props(outlink);
 +}
 +
 +static int insert_filter(BufferSourceContext *abuffer,
 +                         AVFilterLink *link, AVFilterContext **filt_ctx,
 +                         const char *filt_name)
 +{
 +    int ret;
 +
 +    if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
 +        return ret;
 +
 +    link->src->outputs[0] = NULL;
 +    if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
 +        link->src->outputs[0] = link;
 +        return ret;
 +    }
 +
 +    set_link_source(*filt_ctx, link);
 +
 +    if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
 +        avfilter_free(*filt_ctx);
 +        return ret;
 +    }
 +
 +    return 0;
 +}
 +
 +static void remove_filter(AVFilterContext **filt_ctx)
 +{
 +    AVFilterLink *outlink = (*filt_ctx)->outputs[0];
 +    AVFilterContext *src  = (*filt_ctx)->inputs[0]->src;
 +
 +    (*filt_ctx)->outputs[0] = NULL;
 +    avfilter_free(*filt_ctx);
 +    *filt_ctx = NULL;
 +
 +    set_link_source(src, outlink);
 +}
 +
 +static inline void log_input_change(void *ctx, AVFilterLink *link, AVFilterBufferRef *ref)
 +{
 +    char old_layout_str[16], new_layout_str[16];
 +    av_get_channel_layout_string(old_layout_str, sizeof(old_layout_str),
 +                                 -1, link->channel_layout);
 +    av_get_channel_layout_string(new_layout_str, sizeof(new_layout_str),
 +                                 -1, ref->audio->channel_layout);
 +    av_log(ctx, AV_LOG_INFO,
 +           "Audio input format changed: "
 +           "%s:%s:%d -> %s:%s:%d, normalizing\n",
 +           av_get_sample_fmt_name(link->format),
 +           old_layout_str, (int)link->sample_rate,
 +           av_get_sample_fmt_name(ref->format),
 +           new_layout_str, ref->audio->sample_rate);
 +}
 +
 +static int check_format_change_video(AVFilterContext *buffer_filter,
 +                                     AVFilterBufferRef *picref)
 +{
 +    BufferSourceContext *c = buffer_filter->priv;
 +    int ret;
 +
 +    if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
 +        AVFilterContext *scale = buffer_filter->outputs[0]->dst;
 +        AVFilterLink *link;
 +        char scale_param[1024];
 +
 +        av_log(buffer_filter, AV_LOG_INFO,
 +               "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
 +               c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
 +               picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);
 +
 +        if (!scale || strcmp(scale->filter->name, "scale")) {
 +            AVFilter *f = avfilter_get_by_name("scale");
 +
 +            av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
 +            if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
 +                return ret;
 +
 +            c->scale = scale;
 +
 +            snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
 +            if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
 +                return ret;
 +            }
 +
 +            if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
 +                return ret;
 +            }
 +            scale->outputs[0]->time_base = scale->inputs[0]->time_base;
 +
 +            scale->outputs[0]->format= c->pix_fmt;
 +        } else if (!strcmp(scale->filter->name, "scale")) {
 +            snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
 +                     scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
 +            scale->filter->init(scale, scale_param, NULL);
 +        }
 +
 +        c->pix_fmt = scale->inputs[0]->format = picref->format;
 +        c->w       = scale->inputs[0]->w      = picref->video->w;
 +        c->h       = scale->inputs[0]->h      = picref->video->h;
 +
 +        link = scale->outputs[0];
 +        if ((ret =  link->srcpad->config_props(link)) < 0)
 +            return ret;
 +    }
 +    return 0;
 +}
 +
 +static int check_format_change_audio(AVFilterContext *ctx,
 +                                     AVFilterBufferRef *samplesref)
 +{
 +    BufferSourceContext *abuffer = ctx->priv;
 +    AVFilterLink *link;
 +    int ret, logged = 0;
 +
 +    link = ctx->outputs[0];
 +    if (samplesref->audio->sample_rate != link->sample_rate) {
 +
 +        log_input_change(ctx, link, samplesref);
 +        logged = 1;
 +
 +        abuffer->sample_rate = samplesref->audio->sample_rate;
 +
 +        if (!abuffer->aresample) {
 +            ret = insert_filter(abuffer, link, &abuffer->aresample, "aresample");
 +            if (ret < 0) return ret;
 +        } else {
 +            link = abuffer->aresample->outputs[0];
 +            if (samplesref->audio->sample_rate == link->sample_rate)
 +                remove_filter(&abuffer->aresample);
 +            else
 +                if ((ret = reconfigure_filter(abuffer, abuffer->aresample)) < 0)
 +                    return ret;
 +        }
 +    }
 +
 +    link = ctx->outputs[0];
 +    if (samplesref->format                != link->format         ||
 +        samplesref->audio->channel_layout != link->channel_layout ||
 +        samplesref->audio->planar         != link->planar) {
 +
 +        if (!logged) log_input_change(ctx, link, samplesref);
 +
 +        abuffer->sample_format  = samplesref->format;
 +        abuffer->channel_layout = samplesref->audio->channel_layout;
 +        abuffer->packing_format = samplesref->audio->planar;
 +
 +        if (!abuffer->aconvert) {
 +            ret = insert_filter(abuffer, link, &abuffer->aconvert, "aconvert");
 +            if (ret < 0) return ret;
 +        } else {
 +            link = abuffer->aconvert->outputs[0];
 +            if (samplesref->format                == link->format         &&
 +                samplesref->audio->channel_layout == link->channel_layout &&
 +                samplesref->audio->planar         == link->planar
 +               )
 +                remove_filter(&abuffer->aconvert);
 +            else
 +                if ((ret = reconfigure_filter(abuffer, abuffer->aconvert)) < 0)
 +                    return ret;
 +        }
 +    }
 +
 +    return 0;
 +}
 +
 +static int check_format_change(AVFilterContext *buffer_filter,
 +                               AVFilterBufferRef *picref)
 +{
 +    switch (buffer_filter->outputs[0]->type) {
 +    case AVMEDIA_TYPE_VIDEO:
 +        return check_format_change_video(buffer_filter, picref);
 +    case AVMEDIA_TYPE_AUDIO:
 +        return check_format_change_audio(buffer_filter, picref);
 +    default:
 +        return AVERROR(ENOSYS);
 +    }
 +}
 +
 +static AVFilterBufferRef *copy_buffer_ref(AVFilterContext *ctx,
 +                                          AVFilterBufferRef *ref)
 +{
 +    AVFilterLink *outlink = ctx->outputs[0];
 +    AVFilterBufferRef *buf;
 +    int channels, data_size, i;
 +
 +    switch (outlink->type) {
 +
 +    case AVMEDIA_TYPE_VIDEO:
 +        buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
 +                                        ref->video->w, ref->video->h);
 +        av_image_copy(buf->data, buf->linesize,
 +                      (void*)ref->data, ref->linesize,
 +                      ref->format, ref->video->w, ref->video->h);
 +        break;
 +
 +    case AVMEDIA_TYPE_AUDIO:
 +        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
 +                                        ref->audio->nb_samples);
 +        channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
 +        data_size = av_samples_get_buffer_size(NULL, channels,
 +                                               ref->audio->nb_samples,
 +                                               ref->format, 1);
 +        for (i = 0; i < FF_ARRAY_ELEMS(ref->buf->data) && ref->buf->data[i]; i++)
 +            memcpy(buf->buf->data[i], ref->buf->data[i], data_size);
 +        break;
 +
 +    default:
 +        return NULL;
 +    }
 +    avfilter_copy_buffer_ref_props(buf, ref);
 +    return buf;
 +}
 +
 +int av_buffersrc_add_ref(AVFilterContext *buffer_filter,
 +                         AVFilterBufferRef *picref, int flags)
 +{
 +    BufferSourceContext *c = buffer_filter->priv;
 +    AVFilterBufferRef *buf;
 +    int ret;
 +
 +    if (!picref) {
 +        c->eof = 1;
 +        return 0;
 +    } else if (c->eof)
 +        return AVERROR(EINVAL);
 +
 +    if (!av_fifo_space(c->fifo) &&
 +        (ret = av_fifo_realloc2(c->fifo, av_fifo_size(c->fifo) +
 +                                         sizeof(buf))) < 0)
 +        return ret;
 +
 +    if (!(flags & AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT)) {
 +        ret = check_format_change(buffer_filter, picref);
 +        if (ret < 0)
 +            return ret;
 +    }
 +    if (flags & AV_BUFFERSRC_FLAG_NO_COPY)
 +        buf = picref;
 +    else
 +        buf = copy_buffer_ref(buffer_filter, picref);
 +
 +    if ((ret = av_fifo_generic_write(c->fifo, &buf, sizeof(buf), NULL)) < 0) {
 +        if (buf != picref)
 +            avfilter_unref_buffer(buf);
 +        return ret;
 +    }
 +    c->nb_failed_requests = 0;
 +
 +    return 0;
 +}
 +
 +int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
 +                                        AVFilterBufferRef *picref, int flags)
 +{
 +    return av_buffersrc_add_ref(buffer_filter, picref, 0);
 +}
 +
 +#if CONFIG_AVCODEC
 +#include "avcodec.h"
 +
 +int av_buffersrc_add_frame(AVFilterContext *buffer_src,
 +                           const AVFrame *frame, int flags)
 +{
 +    AVFilterBufferRef *picref;
 +    int ret;
 +
 +    if (!frame) /* NULL for EOF */
 +        return av_buffersrc_add_ref(buffer_src, NULL, flags);
 +
 +    switch (buffer_src->outputs[0]->type) {
 +    case AVMEDIA_TYPE_VIDEO:
 +        picref = avfilter_get_video_buffer_ref_from_frame(frame, AV_PERM_WRITE);
 +        break;
 +    case AVMEDIA_TYPE_AUDIO:
 +        picref = avfilter_get_audio_buffer_ref_from_frame(frame, AV_PERM_WRITE);
 +        break;
 +    default:
 +        return AVERROR(ENOSYS);
 +    }
 +    if (!picref)
 +        return AVERROR(ENOMEM);
 +    ret = av_buffersrc_add_ref(buffer_src, picref, flags);
 +    picref->buf->data[0] = NULL;
 +    avfilter_unref_buffer(picref);
 +    return ret;
 +}
 +
 +int av_vsrc_buffer_add_frame(AVFilterContext *buffer_src,
 +                             const AVFrame *frame, int flags)
 +{
 +    return av_buffersrc_add_frame(buffer_src, frame, 0);
 +}
 +#endif
 +
 +unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
 +{
 +    return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
 +}
 +
 +unsigned av_vsrc_buffer_get_nb_failed_requests(AVFilterContext *buffer_src)
 +{
 +    return ((BufferSourceContext *)buffer_src->priv)->nb_failed_requests;
 +}
 +
 +static av_cold int init_video(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    BufferSourceContext *c = ctx->priv;
 +    char pix_fmt_str[128];
 +    int ret, n = 0;
 +    *c->sws_param = 0;
 +
 +    if (!args ||
 +        (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str,
 +                    &c->time_base.num, &c->time_base.den,
 +                    &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) {
 +        av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args);
 +        return AVERROR(EINVAL);
 +    }
 +
 +    if ((ret = ff_parse_pixel_format(&c->pix_fmt, pix_fmt_str, ctx)) < 0)
 +        return ret;
 +
 +    if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
 +        return AVERROR(ENOMEM);
 +
 +    av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n",
 +           c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
 +           c->time_base.num, c->time_base.den,
 +           c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param);
 +    return 0;
 +}
 +
 +static av_cold int init_audio(AVFilterContext *ctx, const char *args0, void *opaque)
 +{
 +    BufferSourceContext *abuffer = ctx->priv;
 +    char *arg = NULL, *ptr, chlayout_str[16];
 +    char *args = av_strdup(args0);
 +    int ret;
 +
 +    arg = av_strtok(args, ":", &ptr);
 +
 +#define ADD_FORMAT(fmt_name)                                            \
 +    if (!arg)                                                           \
 +        goto arg_fail;                                                  \
 +    if ((ret = ff_parse_##fmt_name(&abuffer->fmt_name, arg, ctx)) < 0) { \
 +        av_freep(&args);                                                \
 +        return ret;                                                     \
 +    }                                                                   \
 +    if (*args)                                                          \
 +        arg = av_strtok(NULL, ":", &ptr)
 +
 +    ADD_FORMAT(sample_rate);
 +    ADD_FORMAT(sample_format);
 +    ADD_FORMAT(channel_layout);
 +    ADD_FORMAT(packing_format);
 +
 +    abuffer->fifo = av_fifo_alloc(FIFO_SIZE*sizeof(AVFilterBufferRef*));
 +    if (!abuffer->fifo) {
 +        av_log(ctx, AV_LOG_ERROR, "Failed to allocate fifo, filter init failed.\n");
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str),
 +                                 -1, abuffer->channel_layout);
 +    av_log(ctx, AV_LOG_INFO, "format:%s layout:%s rate:%d\n",
 +           av_get_sample_fmt_name(abuffer->sample_format), chlayout_str,
 +           abuffer->sample_rate);
 +    av_freep(&args);
 +
 +    return 0;
 +
 +arg_fail:
 +    av_log(ctx, AV_LOG_ERROR, "Invalid arguments, must be of the form "
 +                              "sample_rate:sample_fmt:channel_layout:packing\n");
 +    av_freep(&args);
 +    return AVERROR(EINVAL);
 +}
 +
 +static av_cold void uninit(AVFilterContext *ctx)
 +{
 +    BufferSourceContext *s = ctx->priv;
 +    while (s->fifo && av_fifo_size(s->fifo)) {
 +        AVFilterBufferRef *buf;
 +        av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
 +        avfilter_unref_buffer(buf);
 +    }
 +    av_fifo_free(s->fifo);
 +    s->fifo = NULL;
 +    avfilter_free(s->scale);
 +    s->scale = NULL;
 +}
 +
 +static int query_formats_video(AVFilterContext *ctx)
 +{
 +    BufferSourceContext *c = ctx->priv;
 +    enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
 +
 +    avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
 +    return 0;
 +}
 +
 +static int query_formats_audio(AVFilterContext *ctx)
 +{
 +    BufferSourceContext *abuffer = ctx->priv;
 +    AVFilterFormats *formats;
++    AVFilterChannelLayouts *layouts;
 +
 +    formats = NULL;
 +    avfilter_add_format(&formats, abuffer->sample_format);
 +    avfilter_set_common_sample_formats(ctx, formats);
 +
- #if CONFIG_ABUFFER_FILTER
++    layouts = NULL;
++    ff_add_channel_layout(&layouts, abuffer->channel_layout);
++    ff_set_common_channel_layouts(ctx, layouts);
 +
 +    formats = NULL;
 +    avfilter_add_format(&formats, abuffer->packing_format);
 +    avfilter_set_common_packing_formats(ctx, formats);
 +
 +    return 0;
 +}
 +
 +static int config_output_video(AVFilterLink *link)
 +{
 +    BufferSourceContext *c = link->src->priv;
 +
 +    link->w = c->w;
 +    link->h = c->h;
 +    link->sample_aspect_ratio = c->sample_aspect_ratio;
 +    link->time_base = c->time_base;
 +
 +    return 0;
 +}
 +
 +static int config_output_audio(AVFilterLink *outlink)
 +{
 +    BufferSourceContext *abuffer = outlink->src->priv;
 +    outlink->sample_rate = abuffer->sample_rate;
 +    return 0;
 +}
 +
 +static int request_frame(AVFilterLink *link)
 +{
 +    BufferSourceContext *c = link->src->priv;
 +    AVFilterBufferRef *buf;
 +
 +    if (!av_fifo_size(c->fifo)) {
 +        if (c->eof)
 +            return AVERROR_EOF;
 +        c->nb_failed_requests++;
 +        return AVERROR(EAGAIN);
 +    }
 +    av_fifo_generic_read(c->fifo, &buf, sizeof(buf), NULL);
 +
 +    switch (link->type) {
 +    case AVMEDIA_TYPE_VIDEO:
 +        avfilter_start_frame(link, avfilter_ref_buffer(buf, ~0));
 +        avfilter_draw_slice(link, 0, link->h, 1);
 +        avfilter_end_frame(link);
 +        avfilter_unref_buffer(buf);
 +        break;
 +    case AVMEDIA_TYPE_AUDIO:
 +        ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
 +        avfilter_unref_buffer(buf);
 +        break;
 +    default:
 +        return AVERROR(ENOSYS);
 +    }
 +    return 0;
 +}
 +
 +static int poll_frame(AVFilterLink *link)
 +{
 +    BufferSourceContext *c = link->src->priv;
 +    int size = av_fifo_size(c->fifo);
 +    if (!size && c->eof)
 +        return AVERROR_EOF;
 +    return size/sizeof(AVFilterBufferRef*);
 +}
 +
 +int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
 +                                        AVFilterBufferRef *samplesref,
 +                                        int av_unused flags)
 +{
 +    return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
 +}
 +
 +int av_asrc_buffer_add_samples(AVFilterContext *ctx,
 +                               uint8_t *data[8], int linesize[8],
 +                               int nb_samples, int sample_rate,
 +                               int sample_fmt, int64_t channel_layout, int planar,
 +                               int64_t pts, int av_unused flags)
 +{
 +    AVFilterBufferRef *samplesref;
 +
 +    samplesref = avfilter_get_audio_buffer_ref_from_arrays(
 +                     data, linesize[0], AV_PERM_WRITE,
 +                     nb_samples,
 +                     sample_fmt, channel_layout);
 +    if (!samplesref)
 +        return AVERROR(ENOMEM);
 +
 +    samplesref->buf->free  = buf_free;
 +    samplesref->pts = pts;
 +    samplesref->audio->sample_rate = sample_rate;
 +
 +    AV_NOWARN_DEPRECATED(
 +    return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
 +    )
 +}
 +
 +int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
 +                              uint8_t *buf, int buf_size, int sample_rate,
 +                              int sample_fmt, int64_t channel_layout, int planar,
 +                              int64_t pts, int av_unused flags)
 +{
 +    uint8_t *data[8] = {0};
 +    int linesize[8];
 +    int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
 +        nb_samples  = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
 +
 +    av_samples_fill_arrays(data, linesize,
 +                           buf, nb_channels, nb_samples,
 +                           sample_fmt, 16);
 +
 +    AV_NOWARN_DEPRECATED(
 +    return av_asrc_buffer_add_samples(ctx,
 +                                      data, linesize, nb_samples,
 +                                      sample_rate,
 +                                      sample_fmt, channel_layout, planar,
 +                                      pts, flags);
 +    )
 +}
 +
 +AVFilter avfilter_vsrc_buffer = {
 +    .name      = "buffer",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
 +    .priv_size = sizeof(BufferSourceContext),
 +    .query_formats = query_formats_video,
 +
 +    .init      = init_video,
 +    .uninit    = uninit,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_VIDEO,
 +                                    .request_frame   = request_frame,
 +                                    .poll_frame      = poll_frame,
 +                                    .config_props    = config_output_video, },
 +                                  { .name = NULL}},
 +};
 +
- #endif
 +AVFilter avfilter_asrc_abuffer = {
 +    .name        = "abuffer",
 +    .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
 +    .priv_size   = sizeof(BufferSourceContext),
 +    .query_formats = query_formats_audio,
 +
 +    .init        = init_audio,
 +    .uninit      = uninit,
 +
 +    .inputs      = (const AVFilterPad[]) {{ .name = NULL }},
 +    .outputs     = (const AVFilterPad[]) {{ .name      = "default",
 +                                      .type            = AVMEDIA_TYPE_AUDIO,
 +                                      .request_frame   = request_frame,
 +                                      .poll_frame      = poll_frame,
 +                                      .config_props    = config_output_audio, },
 +                                    { .name = NULL}},
 +};
 +
  #include "libavutil/opt.h"
  #include "libavutil/imgutils.h"
  #include "libavformat/avformat.h"
 +#include "audio.h"
 +#include "avcodec.h"
  #include "avfilter.h"
++#include "formats.h"
  
  typedef struct {
 +    /* common A/V fields */
      const AVClass *class;
      int64_t seek_point;   ///< seekpoint in microseconds
      double seek_point_d;
@@@ -335,156 -299,14 +336,158 @@@ AVFilter avfilter_vsrc_movie = 
      .name          = "movie",
      .description   = NULL_IF_CONFIG_SMALL("Read from a movie source."),
      .priv_size     = sizeof(MovieContext),
 -    .init          = init,
 -    .uninit        = uninit,
 -    .query_formats = query_formats,
 +    .init          = movie_init,
 +    .uninit        = movie_common_uninit,
 +    .query_formats = movie_query_formats,
  
 -    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
 -    .outputs   = (AVFilterPad[]) {{ .name            = "default",
 +    .inputs    = (const AVFilterPad[]) {{ .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name      = "default",
                                      .type            = AVMEDIA_TYPE_VIDEO,
 -                                    .request_frame   = request_frame,
 -                                    .config_props    = config_output_props, },
 +                                    .request_frame   = movie_request_frame,
 +                                    .config_props    = movie_config_output_props, },
 +                                  { .name = NULL}},
 +};
 +
 +#endif  /* CONFIG_MOVIE_FILTER */
 +
 +#if CONFIG_AMOVIE_FILTER
 +
 +static av_cold int amovie_init(AVFilterContext *ctx, const char *args, void *opaque)
 +{
 +    MovieContext *movie = ctx->priv;
 +    int ret;
 +
 +    if ((ret = movie_common_init(ctx, args, opaque, AVMEDIA_TYPE_AUDIO)) < 0)
 +        return ret;
 +
 +    movie->bps = av_get_bytes_per_sample(movie->codec_ctx->sample_fmt);
 +    return 0;
 +}
 +
 +static int amovie_query_formats(AVFilterContext *ctx)
 +{
 +    MovieContext *movie = ctx->priv;
 +    AVCodecContext *c = movie->codec_ctx;
 +
 +    enum AVSampleFormat sample_fmts[] = { c->sample_fmt, -1 };
 +    int packing_fmts[] = { AVFILTER_PACKED, -1 };
++    int sample_rates[] = { c->sample_rate, -1 };
 +    int64_t chlayouts[] = { c->channel_layout ? c->channel_layout :
 +                            av_get_default_channel_layout(c->channels), -1 };
 +
 +    avfilter_set_common_sample_formats (ctx, avfilter_make_format_list(sample_fmts));
 +    avfilter_set_common_packing_formats(ctx, avfilter_make_format_list(packing_fmts));
-     avfilter_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
++    ff_set_common_samplerates          (ctx, avfilter_make_format_list(sample_rates));
++    ff_set_common_channel_layouts(ctx, avfilter_make_format64_list(chlayouts));
 +
 +    return 0;
 +}
 +
 +static int amovie_config_output_props(AVFilterLink *outlink)
 +{
 +    MovieContext *movie = outlink->src->priv;
 +    AVCodecContext *c = movie->codec_ctx;
 +
 +    outlink->sample_rate = c->sample_rate;
 +    outlink->time_base = movie->format_ctx->streams[movie->stream_index]->time_base;
 +
 +    return 0;
 +}
 +
 +static int amovie_get_samples(AVFilterLink *outlink)
 +{
 +    MovieContext *movie = outlink->src->priv;
 +    AVPacket pkt;
 +    int ret, got_frame = 0;
 +
 +    if (!movie->pkt.size && movie->is_done == 1)
 +        return AVERROR_EOF;
 +
 +    /* check for another frame, in case the previous one was completely consumed */
 +    if (!movie->pkt.size) {
 +        while ((ret = av_read_frame(movie->format_ctx, &pkt)) >= 0) {
 +            // Is this a packet from the selected stream?
 +            if (pkt.stream_index != movie->stream_index) {
 +                av_free_packet(&pkt);
 +                continue;
 +            } else {
 +                movie->pkt0 = movie->pkt = pkt;
 +                break;
 +            }
 +        }
 +
 +        if (ret == AVERROR_EOF) {
 +            movie->is_done = 1;
 +            return ret;
 +        }
 +    }
 +
 +    /* decode and update the movie pkt */
 +    avcodec_get_frame_defaults(movie->frame);
 +    ret = avcodec_decode_audio4(movie->codec_ctx, movie->frame, &got_frame, &movie->pkt);
 +    if (ret < 0) {
 +        movie->pkt.size = 0;
 +        return ret;
 +    }
 +    movie->pkt.data += ret;
 +    movie->pkt.size -= ret;
 +
 +    /* wrap the decoded data in a samplesref */
 +    if (got_frame) {
 +        int nb_samples = movie->frame->nb_samples;
 +        int data_size =
 +            av_samples_get_buffer_size(NULL, movie->codec_ctx->channels,
 +                                       nb_samples, movie->codec_ctx->sample_fmt, 1);
 +        if (data_size < 0)
 +            return data_size;
 +        movie->samplesref =
 +            ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
 +        memcpy(movie->samplesref->data[0], movie->frame->data[0], data_size);
 +        movie->samplesref->pts = movie->pkt.pts;
 +        movie->samplesref->pos = movie->pkt.pos;
 +        movie->samplesref->audio->sample_rate = movie->codec_ctx->sample_rate;
 +    }
 +
 +    // We got it. Free the packet since we are returning
 +    if (movie->pkt.size <= 0)
 +        av_free_packet(&movie->pkt0);
 +
 +    return 0;
 +}
 +
 +static int amovie_request_frame(AVFilterLink *outlink)
 +{
 +    MovieContext *movie = outlink->src->priv;
 +    int ret;
 +
 +    if (movie->is_done)
 +        return AVERROR_EOF;
 +    do {
 +        if ((ret = amovie_get_samples(outlink)) < 0)
 +            return ret;
 +    } while (!movie->samplesref);
 +
 +    ff_filter_samples(outlink, avfilter_ref_buffer(movie->samplesref, ~0));
 +    avfilter_unref_buffer(movie->samplesref);
 +    movie->samplesref = NULL;
 +
 +    return 0;
 +}
 +
 +AVFilter avfilter_asrc_amovie = {
 +    .name          = "amovie",
 +    .description   = NULL_IF_CONFIG_SMALL("Read audio from a movie source."),
 +    .priv_size     = sizeof(MovieContext),
 +    .init          = amovie_init,
 +    .uninit        = movie_common_uninit,
 +    .query_formats = amovie_query_formats,
 +
 +    .inputs    = (const AVFilterPad[]) {{ .name = NULL }},
 +    .outputs   = (const AVFilterPad[]) {{ .name      = "default",
 +                                    .type            = AVMEDIA_TYPE_AUDIO,
 +                                    .request_frame   = amovie_request_frame,
 +                                    .config_props    = amovie_config_output_props, },
                                    { .name = NULL}},
  };
 +
 +#endif /* CONFIG_AMOVIE_FILTER */
@@@ -29,8 -29,8 +29,8 @@@
  #include "libavutil/avutil.h"
  
  #define LIBAVFILTER_VERSION_MAJOR  2
- #define LIBAVFILTER_VERSION_MINOR 73
 -#define LIBAVFILTER_VERSION_MINOR  17
 -#define LIBAVFILTER_VERSION_MICRO  0
++#define LIBAVFILTER_VERSION_MINOR 74
 +#define LIBAVFILTER_VERSION_MICRO 100
  
  #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
                                                 LIBAVFILTER_VERSION_MINOR, \
@@@ -411,9 -418,19 +418,19 @@@ static av_cold int init(AVFilterContex
  
  static void null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { }
  
+ static int config_props(AVFilterLink *link)
+ {
+     link->time_base.num = link->src->inputs[0]->time_base.num;
+     link->time_base.den = link->src->inputs[0]->time_base.den * 2;
+     link->w             = link->src->inputs[0]->w;
+     link->h             = link->src->inputs[0]->h;
+     return 0;
+ }
  AVFilter avfilter_vf_yadif = {
      .name          = "yadif",
 -    .description   = NULL_IF_CONFIG_SMALL("Deinterlace the input image"),
 +    .description   = NULL_IF_CONFIG_SMALL("Deinterlace the input image."),
  
      .priv_size     = sizeof(YADIFContext),
      .init          = init,
                                      .start_frame      = start_frame,
                                      .get_video_buffer = get_video_buffer,
                                      .draw_slice       = null_draw_slice,
 -                                    .end_frame        = end_frame, },
 +                                    .end_frame        = end_frame,
 +                                    .rej_perms        = AV_PERM_REUSE2, },
                                    { .name = NULL}},
  
 -    .outputs   = (AVFilterPad[]) {{ .name             = "default",
 +    .outputs   = (const AVFilterPad[]) {{ .name       = "default",
                                      .type             = AVMEDIA_TYPE_VIDEO,
                                      .poll_frame       = poll_frame,
-                                     .request_frame    = request_frame, },
+                                     .request_frame    = request_frame,
+                                     .config_props     = config_props, },
                                    { .name = NULL}},
  };
index 0000000,26805fe..86dfd90
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,294 +1,345 @@@
 - * This file is part of Libav.
+ /*
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 -                ref->video->pixel_aspect.num, ref->video->pixel_aspect.den,
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ #include "libavutil/imgutils.h"
+ #include "avfilter.h"
+ #include "internal.h"
++static char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms)
++{
++    snprintf(buf, buf_size, "%s%s%s%s%s%s",
++             perms & AV_PERM_READ      ? "r" : "",
++             perms & AV_PERM_WRITE     ? "w" : "",
++             perms & AV_PERM_PRESERVE  ? "p" : "",
++             perms & AV_PERM_REUSE     ? "u" : "",
++             perms & AV_PERM_REUSE2    ? "U" : "",
++             perms & AV_PERM_NEG_LINESIZES ? "n" : "");
++    return buf;
++}
++
+ static void ff_dlog_ref(void *ctx, AVFilterBufferRef *ref, int end)
+ {
+     av_unused char buf[16];
+     av_dlog(ctx,
+             "ref[%p buf:%p refcount:%d perms:%s data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
+             ref, ref->buf, ref->buf->refcount, ff_get_ref_perms_string(buf, sizeof(buf), ref->perms), ref->data[0],
+             ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
+             ref->pts, ref->pos);
+     if (ref->video) {
+         av_dlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
 -/* TODO: set the buffer's priv member to a context structure for the whole
 - * filter chain.  This will allow for a buffer pool instead of the constant
 - * alloc & free cycle currently implemented. */
++                ref->video->sample_aspect_ratio.num, ref->video->sample_aspect_ratio.den,
+                 ref->video->w, ref->video->h,
+                 !ref->video->interlaced     ? 'P' :         /* Progressive  */
+                 ref->video->top_field_first ? 'T' : 'B',    /* Top / Bottom */
+                 ref->video->key_frame,
+                 av_get_picture_type_char(ref->video->pict_type));
+     }
+     if (ref->audio) {
+         av_dlog(ctx, " cl:%"PRId64"d n:%d r:%d p:%d",
+                 ref->audio->channel_layout,
+                 ref->audio->nb_samples,
+                 ref->audio->sample_rate,
+                 ref->audio->planar);
+     }
+     av_dlog(ctx, "]%s", end ? "\n" : "");
+ }
+ AVFilterBufferRef *avfilter_null_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
+ {
+     return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
+ }
 -    // +2 is needed for swscaler, +16 to be SIMD-friendly
 -    if (av_image_alloc(data, linesize, w, h, link->format, 16) < 0)
+ AVFilterBufferRef *avfilter_default_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
+ {
+     int linesize[4];
+     uint8_t *data[4];
++    int i;
+     AVFilterBufferRef *picref = NULL;
++    AVFilterPool *pool = link->pool;
++
++    if (pool) {
++        for (i = 0; i < POOL_SIZE; i++) {
++            picref = pool->pic[i];
++            if (picref && picref->buf->format == link->format && picref->buf->w == w && picref->buf->h == h) {
++                AVFilterBuffer *pic = picref->buf;
++                pool->pic[i] = NULL;
++                pool->count--;
++                picref->video->w = w;
++                picref->video->h = h;
++                picref->perms = perms | AV_PERM_READ;
++                picref->format = link->format;
++                pic->refcount = 1;
++                memcpy(picref->data,     pic->data,     sizeof(picref->data));
++                memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
++                pool->refcount++;
++                return picref;
++            }
++        }
++    } else {
++        pool = link->pool = av_mallocz(sizeof(AVFilterPool));
++        pool->refcount = 1;
++    }
 -avfilter_get_video_buffer_ref_from_arrays(uint8_t *data[4], int linesize[4], int perms,
++    // align: +2 is needed for swscaler, +16 to be SIMD-friendly
++    if ((i = av_image_alloc(data, linesize, w, h, link->format, 32)) < 0)
+         return NULL;
+     picref = avfilter_get_video_buffer_ref_from_arrays(data, linesize,
+                                                        perms, w, h, link->format);
+     if (!picref) {
+         av_free(data[0]);
+         return NULL;
+     }
++    memset(data[0], 128, i);
++
++    picref->buf->priv = pool;
++    picref->buf->free = NULL;
++    pool->refcount++;
++
+     return picref;
+ }
+ AVFilterBufferRef *
++avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
+                                           int w, int h, enum PixelFormat format)
+ {
+     AVFilterBuffer *pic = av_mallocz(sizeof(AVFilterBuffer));
+     AVFilterBufferRef *picref = av_mallocz(sizeof(AVFilterBufferRef));
+     if (!pic || !picref)
+         goto fail;
+     picref->buf = pic;
+     picref->buf->free = ff_avfilter_default_free_buffer;
+     if (!(picref->video = av_mallocz(sizeof(AVFilterBufferRefVideoProps))))
+         goto fail;
+     pic->w = picref->video->w = w;
+     pic->h = picref->video->h = h;
+     /* make sure the buffer gets read permission or it's useless for output */
+     picref->perms = perms | AV_PERM_READ;
+     pic->refcount = 1;
+     picref->type = AVMEDIA_TYPE_VIDEO;
+     pic->format = picref->format = format;
+     memcpy(pic->data,        data,          4*sizeof(data[0]));
+     memcpy(pic->linesize,    linesize,      4*sizeof(linesize[0]));
+     memcpy(picref->data,     pic->data,     sizeof(picref->data));
+     memcpy(picref->linesize, pic->linesize, sizeof(picref->linesize));
+     pic->   extended_data = pic->data;
+     picref->extended_data = picref->data;
+     return picref;
+ fail:
+     if (picref && picref->video)
+         av_free(picref->video);
+     av_free(picref);
+     av_free(pic);
+     return NULL;
+ }
+ AVFilterBufferRef *avfilter_get_video_buffer(AVFilterLink *link, int perms, int w, int h)
+ {
+     AVFilterBufferRef *ret = NULL;
+     av_unused char buf[16];
+     FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0);
+     av_dlog(NULL, " perms:%s w:%d h:%d\n", ff_get_ref_perms_string(buf, sizeof(buf), perms), w, h);
+     if (link->dstpad->get_video_buffer)
+         ret = link->dstpad->get_video_buffer(link, perms, w, h);
+     if (!ret)
+         ret = avfilter_default_get_video_buffer(link, perms, w, h);
+     if (ret)
+         ret->type = AVMEDIA_TYPE_VIDEO;
+     FF_DPRINTF_START(NULL, get_video_buffer); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " returning "); ff_dlog_ref(NULL, ret, 1);
+     return ret;
+ }
+ void avfilter_null_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+ {
+     avfilter_start_frame(link->dst->outputs[0], picref);
+ }
+ void avfilter_default_start_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
+ {
+     AVFilterLink *outlink = NULL;
+     if (inlink->dst->output_count)
+         outlink = inlink->dst->outputs[0];
+     if (outlink) {
+         outlink->out_buf = avfilter_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
+         avfilter_copy_buffer_ref_props(outlink->out_buf, picref);
+         avfilter_start_frame(outlink, avfilter_ref_buffer(outlink->out_buf, ~0));
+     }
+ }
+ /* XXX: should we do the duplicating of the picture ref here, instead of
+  * forcing the source filter to do it? */
+ void avfilter_start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
+ {
+     void (*start_frame)(AVFilterLink *, AVFilterBufferRef *);
+     AVFilterPad *dst = link->dstpad;
+     int perms = picref->perms;
++    AVFilterCommand *cmd= link->dst->command_queue;
+     FF_DPRINTF_START(NULL, start_frame); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " "); ff_dlog_ref(NULL, picref, 1);
+     if (!(start_frame = dst->start_frame))
+         start_frame = avfilter_default_start_frame;
+     if (picref->linesize[0] < 0)
+         perms |= AV_PERM_NEG_LINESIZES;
+     /* prepare to copy the picture if it has insufficient permissions */
+     if ((dst->min_perms & perms) != dst->min_perms || dst->rej_perms & perms) {
+         av_log(link->dst, AV_LOG_DEBUG,
+                 "frame copy needed (have perms %x, need %x, reject %x)\n",
+                 picref->perms,
+                 link->dstpad->min_perms, link->dstpad->rej_perms);
+         link->cur_buf = avfilter_get_video_buffer(link, dst->min_perms, link->w, link->h);
+         link->src_buf = picref;
+         avfilter_copy_buffer_ref_props(link->cur_buf, link->src_buf);
+     }
+     else
+         link->cur_buf = picref;
++    while(cmd && cmd->time <= picref->pts * av_q2d(link->time_base)){
++        av_log(link->dst, AV_LOG_DEBUG,
++               "Processing command time:%f command:%s arg:%s\n",
++               cmd->time, cmd->command, cmd->arg);
++        avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
++        ff_command_queue_pop(link->dst);
++        cmd= link->dst->command_queue;
++    }
++
+     start_frame(link, link->cur_buf);
++    ff_update_link_current_pts(link, link->cur_buf->pts);
+ }
+ void avfilter_null_end_frame(AVFilterLink *link)
+ {
+     avfilter_end_frame(link->dst->outputs[0]);
+ }
+ void avfilter_default_end_frame(AVFilterLink *inlink)
+ {
+     AVFilterLink *outlink = NULL;
+     if (inlink->dst->output_count)
+         outlink = inlink->dst->outputs[0];
+     avfilter_unref_buffer(inlink->cur_buf);
+     inlink->cur_buf = NULL;
+     if (outlink) {
+         if (outlink->out_buf) {
+             avfilter_unref_buffer(outlink->out_buf);
+             outlink->out_buf = NULL;
+         }
+         avfilter_end_frame(outlink);
+     }
+ }
+ void avfilter_end_frame(AVFilterLink *link)
+ {
+     void (*end_frame)(AVFilterLink *);
+     if (!(end_frame = link->dstpad->end_frame))
+         end_frame = avfilter_default_end_frame;
+     end_frame(link);
+     /* unreference the source picture if we're feeding the destination filter
+      * a copied version dues to permission issues */
+     if (link->src_buf) {
+         avfilter_unref_buffer(link->src_buf);
+         link->src_buf = NULL;
+     }
+ }
+ void avfilter_null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+ {
+     avfilter_draw_slice(link->dst->outputs[0], y, h, slice_dir);
+ }
+ void avfilter_default_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
+ {
+     AVFilterLink *outlink = NULL;
+     if (inlink->dst->output_count)
+         outlink = inlink->dst->outputs[0];
+     if (outlink)
+         avfilter_draw_slice(outlink, y, h, slice_dir);
+ }
+ void avfilter_draw_slice(AVFilterLink *link, int y, int h, int slice_dir)
+ {
+     uint8_t *src[4], *dst[4];
+     int i, j, vsub;
+     void (*draw_slice)(AVFilterLink *, int, int, int);
+     FF_DPRINTF_START(NULL, draw_slice); ff_dlog_link(NULL, link, 0); av_dlog(NULL, " y:%d h:%d dir:%d\n", y, h, slice_dir);
+     /* copy the slice if needed for permission reasons */
+     if (link->src_buf) {
+         vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;
+         for (i = 0; i < 4; i++) {
+             if (link->src_buf->data[i]) {
+                 src[i] = link->src_buf-> data[i] +
+                     (y >> (i==1 || i==2 ? vsub : 0)) * link->src_buf-> linesize[i];
+                 dst[i] = link->cur_buf->data[i] +
+                     (y >> (i==1 || i==2 ? vsub : 0)) * link->cur_buf->linesize[i];
+             } else
+                 src[i] = dst[i] = NULL;
+         }
+         for (i = 0; i < 4; i++) {
+             int planew =
+                 av_image_get_linesize(link->format, link->cur_buf->video->w, i);
+             if (!src[i]) continue;
+             for (j = 0; j < h >> (i==1 || i==2 ? vsub : 0); j++) {
+                 memcpy(dst[i], src[i], planew);
+                 src[i] += link->src_buf->linesize[i];
+                 dst[i] += link->cur_buf->linesize[i];
+             }
+         }
+     }
+     if (!(draw_slice = link->dstpad->draw_slice))
+         draw_slice = avfilter_default_draw_slice;
+     draw_slice(link, y, h, slice_dir);
+ }
   */
  
  #define LIBAVUTIL_VERSION_MAJOR 51
- #define LIBAVUTIL_VERSION_MINOR 52
 -#define LIBAVUTIL_VERSION_MINOR 30
 -#define LIBAVUTIL_VERSION_MICRO  0
++#define LIBAVUTIL_VERSION_MINOR 53
 +#define LIBAVUTIL_VERSION_MICRO 100
  
  #define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
                                                 LIBAVUTIL_VERSION_MINOR, \
Simple merge
@@@ -34,17 -34,11 +34,23 @@@ fate-nellymoser: CMD = pcm -i $(SAMPLES
  fate-nellymoser: CMP = oneoff
  fate-nellymoser: REF = $(SAMPLES)/nellymoser/nellymoser.pcm
  
 -FATE_AVCONV += fate-sierra-vmd-audio
 +FATE_AUDIO += fate-nellymoser-aref-encode
 +fate-nellymoser-aref-encode: $(AREF)
 +fate-nellymoser-aref-encode: CMD = enc_dec_pcm flv wav s16le $(REF) -c:a nellymoser
 +fate-nellymoser-aref-encode: CMP = stddev
 +fate-nellymoser-aref-encode: REF = ./tests/data/acodec-16000-1.ref.wav
 +fate-nellymoser-aref-encode: CMP_SHIFT = -1172
 +fate-nellymoser-aref-encode: CMP_TARGET = 9617
 +fate-nellymoser-aref-encode: SIZE_TOLERANCE = 268
 +
++FATE_AUDIO += fate-sierra-vmd-audio
+ fate-sierra-vmd-audio: CMD = framecrc -i $(SAMPLES)/vmd/12.vmd -vn
 -FATE_AVCONV += fate-smacker-audio
++FATE_AUDIO += fate-smacker-audio
+ fate-smacker-audio: CMD = framecrc -i $(SAMPLES)/smacker/wetlogo.smk -vn
 -FATE_AVCONV += fate-ws_snd
 +FATE_AUDIO += fate-ws_snd
  fate-ws_snd: CMD = md5 -i $(SAMPLES)/vqa/ws_snd.vqa -f s16le
 +
 +FATE_FFMPEG += $(FATE_AUDIO)
 +fate-audio: $(FATE_AUDIO)
@@@ -1,83 -1,74 +1,80 @@@
 -FATE_AVCONV += fate-adts-demux
 +FATE_DEMUX += fate-avio-direct
 +fate-avio-direct: CMD = framecrc -avioflags direct -i $(SAMPLES)/fraps/fraps-v5-bouncing-balls-partial.avi -avioflags direct
 +
 +FATE_DEMUX += fate-adts-demux
  fate-adts-demux: CMD = crc -i $(SAMPLES)/aac/ct_faac-adts.aac -acodec copy
  
 -FATE_AVCONV += fate-aea-demux
 +FATE_DEMUX += fate-aea-demux
  fate-aea-demux: CMD = crc -i $(SAMPLES)/aea/chirp.aea -acodec copy
  
 -FATE_AVCONV += fate-bink-demux
 +FATE_DEMUX += fate-bink-demux
  fate-bink-demux: CMD = crc -i $(SAMPLES)/bink/Snd0a7d9b58.dee -vn -acodec copy
  
 -FATE_AVCONV += fate-caf
 +FATE_DEMUX += fate-caf
  fate-caf: CMD = crc -i $(SAMPLES)/caf/caf-pcm16.caf -c copy
  
 -FATE_AVCONV += fate-cdxl-demux
 +FATE_DEMUX += fate-cdxl-demux
  fate-cdxl-demux: CMD = framecrc -i $(SAMPLES)/cdxl/mirage.cdxl -vcodec copy -acodec copy
  
 -FATE_AVCONV += fate-d-cinema-demux
 +FATE_DEMUX += fate-d-cinema-demux
  fate-d-cinema-demux: CMD = framecrc -i $(SAMPLES)/d-cinema/THX_Science_FLT_1920-partial.302 -acodec copy
  
 -FATE_AVCONV += fate-iv8-demux
 +FATE_DEMUX += fate-iv8-demux
  fate-iv8-demux: CMD = framecrc -i $(SAMPLES)/iv8/zzz-partial.mpg -vcodec copy
  
 -FATE_AVCONV += fate-lmlm4-demux
 +FATE_DEMUX += fate-lmlm4-demux
  fate-lmlm4-demux: CMD = framecrc -i $(SAMPLES)/lmlm4/LMLM4_CIFat30fps.divx -t 3 -acodec copy -vcodec copy
  
 -FATE_AVCONV += fate-maxis-xa
 +FATE_DEMUX += fate-maxis-xa
  fate-maxis-xa: CMD = framecrc -i $(SAMPLES)/maxis-xa/SC2KBUG.XA -frames:a 30 -c:a copy
  
 -FATE_AVCONV += fate-mtv
 +FATE_DEMUX += fate-mtv
  fate-mtv: CMD = framecrc -i $(SAMPLES)/mtv/comedian_auto-partial.mtv -c copy
  
 -FATE_AVCONV += fate-mxf-demux
 +FATE_DEMUX += fate-mxf-demux
  fate-mxf-demux: CMD = framecrc -i $(SAMPLES)/mxf/C0023S01.mxf -acodec copy -vcodec copy
  
 -FATE_AVCONV += fate-nc-demux
 +FATE_DEMUX += fate-nc-demux
  fate-nc-demux: CMD = framecrc -i $(SAMPLES)/nc-camera/nc-sample-partial -vcodec copy
  
 -FATE_AVCONV += fate-nsv-demux
 +FATE_DEMUX += fate-nsv-demux
  fate-nsv-demux: CMD = framecrc -i $(SAMPLES)/nsv/witchblade-51kbps.nsv -t 6 -vcodec copy -acodec copy
  
 -FATE_AVCONV += fate-oma-demux
 +FATE_DEMUX += fate-oma-demux
  fate-oma-demux: CMD = crc -i $(SAMPLES)/oma/01-Untitled-partial.oma -acodec copy
  
 -FATE_AVCONV += fate-psx-str
 +FATE_DEMUX += fate-psx-str
  fate-psx-str: CMD = framecrc -i $(SAMPLES)/psx-str/descent-partial.str
  
 -FATE_AVCONV += fate-psx-str-v3-mdec
 +FATE_DEMUX += fate-psx-str-v3-mdec
  fate-psx-str-v3-mdec: CMD = framecrc -i $(SAMPLES)/psx-str/abc000_cut.str -an
  
 -FATE_AVCONV += fate-pva-demux
 -fate-pva-demux: CMD = framecrc -idct simple -i $(SAMPLES)/pva/PVA_test-partial.pva -t 0.6 -acodec copy -vn
 +FATE_DEMUX += fate-pva-demux
 +fate-pva-demux: CMD = framecrc -idct simple -i $(SAMPLES)/pva/PVA_test-partial.pva -t 0.6 -acodec copy
  
 -FATE_AVCONV += fate-qcp-demux
 +FATE_DEMUX += fate-qcp-demux
  fate-qcp-demux: CMD = crc -i $(SAMPLES)/qcp/0036580847.QCP -acodec copy
  
 -FATE_AVCONV += fate-redcode-demux
 +FATE_DEMUX += fate-redcode-demux
  fate-redcode-demux: CMD = framecrc -i $(SAMPLES)/r3d/4MB-sample.r3d -vcodec copy -acodec copy
  
- FATE_DEMUX += fate-sierra-vmd
- fate-sierra-vmd: CMD = framecrc -i $(SAMPLES)/vmd/12.vmd -pix_fmt rgb24
 -FATE_AVCONV += fate-siff
 +FATE_DEMUX += fate-siff
  fate-siff: CMD = framecrc -i $(SAMPLES)/SIFF/INTRO_B.VB -t 3 -pix_fmt rgb24
  
- FATE_DEMUX += fate-smjpeg
- fate-smjpeg: CMD = framecrc -i $(SAMPLES)/smjpeg/scenwin.mjpg -vcodec copy
 -FATE_AVCONV += fate-smjpeg-demux
++FATE_DEMUX += fate-smjpeg-demux
+ fate-smjpeg-demux: CMD = framecrc -i $(SAMPLES)/smjpeg/scenwin.mjpg -c copy
  
 -FATE_AVCONV += fate-westwood-aud
 +FATE_DEMUX += fate-westwood-aud
  fate-westwood-aud: CMD = framecrc -i $(SAMPLES)/westwood-aud/excellent.aud -c copy
  
 -FATE_AVCONV += fate-wtv-demux
 +FATE_DEMUX += fate-wtv-demux
  fate-wtv-demux: CMD = framecrc -i $(SAMPLES)/wtv/law-and-order-partial.wtv -vcodec copy -acodec copy
  
 -FATE_AVCONV += fate-xmv-demux
 +FATE_DEMUX += fate-xmv-demux
  fate-xmv-demux: CMD = framecrc -i $(SAMPLES)/xmv/logos1p.fmv -vcodec copy -acodec copy
  
 -FATE_AVCONV += fate-xwma-demux
 +FATE_DEMUX += fate-xwma-demux
  fate-xwma-demux: CMD = crc -i $(SAMPLES)/xwma/ergon.xwma -acodec copy
 +
 +FATE_FFMPEG += $(FATE_DEMUX)
 +fate-demux: $(FATE_DEMUX)
@@@ -161,40 -161,43 +161,43 @@@ FATE_VIDEO += fate-mpeg2-field-en
  fate-mpeg2-field-enc: CMD = framecrc -flags +bitexact -dct fastint -idct simple -i $(SAMPLES)/mpeg2/mpeg2_field_encoding.ts -an
  
  # FIXME dropped frames in this test because of coarse timebase
 -FATE_AVCONV += fate-nuv
 +FATE_VIDEO += fate-nuv
  fate-nuv: CMD = framecrc -idct simple -i $(SAMPLES)/nuv/Today.nuv -an
  
 -FATE_AVCONV += fate-qpeg
 +FATE_VIDEO += fate-qpeg
  fate-qpeg: CMD = framecrc -i $(SAMPLES)/qpeg/Clock.avi -an -pix_fmt rgb24
  
 -FATE_AVCONV += fate-r210
 +FATE_VIDEO += fate-r210
  fate-r210: CMD = framecrc -i $(SAMPLES)/r210/r210.avi -pix_fmt rgb48le
  
 -FATE_AVCONV += fate-rl2
 +FATE_VIDEO += fate-rl2
  fate-rl2: CMD = framecrc -i $(SAMPLES)/rl2/Z4915300.RL2 -pix_fmt rgb24 -an
  
 -FATE_AVCONV += fate-roqvideo
 +FATE_VIDEO += fate-roqvideo
  fate-roqvideo: CMD = framecrc -i $(SAMPLES)/idroq/idlogo.roq -an
  
- FATE_VIDEO += fate-smacker
- fate-smacker: CMD = framecrc -i $(SAMPLES)/smacker/wetlogo.smk -pix_fmt rgb24
 -FATE_AVCONV += fate-sierra-vmd-video
++FATA_VIDEO += fate-sierra-vmd-video
+ fate-sierra-vmd-video: CMD = framecrc -i $(SAMPLES)/vmd/12.vmd -pix_fmt rgb24 -an
 -FATE_AVCONV += fate-smacker-video
++FATA_VIDEO += fate-smacker-video
+ fate-smacker-video: CMD = framecrc -i $(SAMPLES)/smacker/wetlogo.smk -pix_fmt rgb24 -an
  
 -FATE_AVCONV += fate-smc
 +FATE_VIDEO += fate-smc
  fate-smc: CMD = framecrc -i $(SAMPLES)/smc/cass_schi.qt -pix_fmt rgb24
  
 -FATE_AVCONV += fate-sp5x
 +FATE_VIDEO += fate-sp5x
  fate-sp5x: CMD = framecrc -idct simple -i $(SAMPLES)/sp5x/sp5x_problem.avi
  
 -FATE_AVCONV += fate-sub-srt
 +FATE_VIDEO += fate-sub-srt
  fate-sub-srt: CMD = md5 -i $(SAMPLES)/sub/SubRip_capability_tester.srt -f ass
  
 -FATE_AVCONV += fate-thp
 +FATE_VIDEO += fate-thp
  fate-thp: CMD = framecrc -idct simple -i $(SAMPLES)/thp/pikmin2-opening1-partial.thp -an
  
 -FATE_AVCONV += fate-tiertex-seq
 +FATE_VIDEO += fate-tiertex-seq
  fate-tiertex-seq: CMD = framecrc -i $(SAMPLES)/tiertex-seq/Gameover.seq -pix_fmt rgb24
  
 -FATE_AVCONV += fate-tmv
 +FATE_VIDEO += fate-tmv
  fate-tmv: CMD = framecrc -i $(SAMPLES)/tmv/pop-partial.tmv -pix_fmt rgb24
  
  FATE_TXD += fate-txd-16bpp
  
  #include "libavformat/avformat.h"
  #include "libavutil/pixdesc.h"
 +#include "libavutil/samplefmt.h"
  #include "libavfilter/avfilter.h"
  
-             fmts = filter_ctx->inout##puts[i]->outin##_chlayouts;       \
 +static void print_formats(AVFilterContext *filter_ctx)
 +{
 +    int i, j;
 +
 +#define PRINT_FMTS(inout, outin, INOUT)                                 \
 +    for (i = 0; i < filter_ctx->inout##put_count; i++) {                     \
 +        if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_VIDEO) {   \
 +            AVFilterFormats *fmts =                                     \
 +                filter_ctx->inout##puts[i]->outin##_formats;            \
 +            for (j = 0; j < fmts->format_count; j++)                    \
 +                if(av_get_pix_fmt_name(fmts->formats[j]))               \
 +                printf(#INOUT "PUT[%d] %s: fmt:%s\n",                   \
 +                       i, filter_ctx->filter->inout##puts[i].name,      \
 +                       av_get_pix_fmt_name(fmts->formats[j]));          \
 +        } else if (filter_ctx->inout##puts[i]->type == AVMEDIA_TYPE_AUDIO) { \
 +            AVFilterFormats *fmts;                                      \
 +                                                                        \
 +            fmts = filter_ctx->inout##puts[i]->outin##_formats;         \
 +            for (j = 0; j < fmts->format_count; j++)                    \
 +                printf(#INOUT "PUT[%d] %s: fmt:%s\n",                   \
 +                       i, filter_ctx->filter->inout##puts[i].name,      \
 +                       av_get_sample_fmt_name(fmts->formats[j]));       \
 +                                                                        \
++            fmts = filter_ctx->inout##puts[i]->outin##_channel_layouts; \
 +            for (j = 0; j < fmts->format_count; j++) {                  \
 +                char buf[256];                                          \
 +                av_get_channel_layout_string(buf, sizeof(buf), -1,      \
 +                                             fmts->formats[j]);         \
 +                printf(#INOUT "PUT[%d] %s: chlayout:%s\n",              \
 +                       i, filter_ctx->filter->inout##puts[i].name, buf); \
 +            }                                                           \
 +                                                                        \
 +            fmts = filter_ctx->inout##puts[i]->outin##_packing;         \
 +            for (j = 0; j < fmts->format_count; j++) {                  \
 +                printf(#INOUT "PUT[%d] %s: packing:%s\n",               \
 +                       i, filter_ctx->filter->inout##puts[i].name,      \
 +                       fmts->formats[j] == AVFILTER_PACKED ?            \
 +                                           "packed" : "planar");        \
 +            }                                                           \
 +        }                                                               \
 +    }                                                                   \
 +
 +    PRINT_FMTS(in,  out, IN);
 +    PRINT_FMTS(out, in,  OUT);
 +}
 +
  int main(int argc, char **argv)
  {
      AVFilter *filter;