From: Derek Buitenhuis Date: Mon, 18 Apr 2016 14:22:24 +0000 (+0100) Subject: Merge commit '98114d70e48caf871b0fe9b8e5bf8ebd989b845d' X-Git-Tag: android-x86-7.1-r1~5545 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=34245eccaf99885e12259c1de00a3af3acfed45a;p=android-x86%2Fexternal-ffmpeg.git Merge commit '98114d70e48caf871b0fe9b8e5bf8ebd989b845d' * commit '98114d70e48caf871b0fe9b8e5bf8ebd989b845d': lavf: VAAPI scale filter Merged-by: Derek Buitenhuis --- 34245eccaf99885e12259c1de00a3af3acfed45a diff --cc Changelog index 6e40739cc9,4a7d2a117b..827e3e255c --- a/Changelog +++ b/Changelog @@@ -2,196 -2,17 +2,197 @@@ Entries are sorted chronologically fro releases are sorted from youngest to oldest. version : -- aliases and defaults for Ogg subtypes (opus, spx) -- HEVC/H.265 RTP payload format (draft v6) packetizer and depacketizer -- avplay now exits by default at the end of playback -- XCB-based screen-grabber -- creating DASH compatible fragmented MP4, MPEG-DASH segmenting muxer -- H.261 RTP payload format (RFC 4587) depacketizer and experimental packetizer +- DXVA2-accelerated HEVC Main10 decoding +- fieldhint filter +- loop video filter and aloop audio filter +- Bob Weaver deinterlacing filter +- firequalizer filter +- datascope filter +- bench and abench filters +- ciescope filter +- protocol blacklisting API +- MediaCodec H264 decoding +- VC-2 HQ RTP payload format (draft v1) depacketizer +- AudioToolbox audio decoders +- AudioToolbox audio encoders +- coreimage filter (GPU based image filtering on OSX) +- libdcadec removed +- bitstream filter for extracting DTS core +- ADPCM IMA DAT4 decoder +- musx demuxer +- aix demuxer +- remap filter +- hash and framehash muxers +- colorspace filter +- hdcd filter +- readvitc filter ++- VAAPI-accelerated format conversion and scaling + +version 3.0: +- Common Encryption (CENC) MP4 encoding and decoding support +- DXV decoding +- extrastereo filter +- ocr filter +- alimiter filter +- stereowiden filter +- stereotools filter +- rubberband filter +- tremolo filter +- agate filter +- chromakey filter +- maskedmerge filter +- Screenpresso SPV1 decoding +- chromaprint fingerprinting muxer +- ffplay dynamic volume control +- displace filter +- selectivecolor filter +- extensive native AAC encoder improvements and removal of experimental flag +- ADPCM PSX decoder +- 3dostr, dcstr, fsb, genh, vag, xvag, ads, msf, svag & vpk demuxer +- zscale filter +- wve demuxer +- zero-copy Intel QSV transcoding in ffmpeg +- shuffleframes filter +- SDX2 DPCM decoder +- vibrato filter +- innoHeim/Rsupport Screen Capture Codec decoder +- ADPCM AICA decoder +- Interplay ACM demuxer and audio decoder +- XMA1 & XMA2 decoder +- realtime filter +- anoisesrc audio filter source +- IVR demuxer +- compensationdelay filter +- acompressor filter +- support encoding 16-bit RLE SGI images +- apulsator filter +- sidechaingate audio filter +- mipsdspr1 option has been renamed to mipsdsp +- aemphasis filter +- mips32r5 option has been removed +- mips64r6 option has been removed +- DXVA2-accelerated VP9 decoding +- SOFAlizer: virtual binaural acoustics filter +- VAAPI VP9 hwaccel +- audio high-order multiband parametric equalizer +- automatic bitstream filtering +- showspectrumpic filter +- libstagefright support removed +- spectrumsynth filter +- ahistogram filter +- only seek with the right mouse button in ffplay +- toggle full screen when double-clicking with the left mouse button in ffplay +- afftfilt filter +- convolution filter +- libquvi support removed +- support for dvaudio in wav and avi +- libaacplus and libvo-aacenc support removed +- Cineform HD decoder +- new DCA decoder with full support for DTS-HD extensions +- significant performance improvements in Windows Television (WTV) demuxer +- nnedi deinterlacer +- streamselect video and astreamselect audio filter +- swaprect filter +- metadata video and ametadata audio filter +- SMPTE VC-2 HQ profile support for the Dirac decoder +- SMPTE VC-2 native encoder supporting the HQ profile + + +version 2.8: +- colorkey video filter +- BFSTM/BCSTM demuxer +- little-endian ADPCM_THP decoder +- Hap decoder and encoder +- DirectDraw Surface image/texture decoder +- ssim filter +- optional new ASF demuxer +- showvolume filter +- Many improvements to the JPEG 2000 decoder +- Go2Meeting decoding support +- adrawgraph audio and drawgraph video filter +- removegrain video filter +- Intel QSV-accelerated MPEG-2 video and HEVC encoding +- Intel QSV-accelerated MPEG-2 video and HEVC decoding +- Intel QSV-accelerated VC-1 video decoding +- libkvazaar HEVC encoder +- erosion, dilation, deflate and inflate video filters +- Dynamic Audio Normalizer as dynaudnorm filter +- Reverse video and areverse audio filter +- Random filter +- deband filter +- AAC fixed-point decoding +- sidechaincompress audio filter +- bitstream filter for converting HEVC from MP4 to Annex B +- acrossfade audio filter +- allyuv and allrgb video sources +- atadenoise video filter +- OS X VideoToolbox support +- aphasemeter filter +- showfreqs filter +- vectorscope filter +- waveform filter +- hstack and vstack filter +- Support DNx100 (1440x1080@8) +- VAAPI hevc hwaccel +- VDPAU hevc hwaccel +- framerate filter +- Switched default encoders for webm to VP9 and Opus +- Removed experimental flag from the JPEG 2000 encoder + + +version 2.7: +- FFT video filter +- TDSC decoder +- DTS lossless extension (XLL) decoding (not lossless, disabled by default) +- showwavespic filter +- DTS decoding through libdcadec +- Drop support for nvenc API before 5.0 +- nvenc HEVC encoder +- Detelecine filter +- Intel QSV-accelerated H.264 encoding +- MMAL-accelerated H.264 decoding +- basic APNG encoder and muxer with default extension "apng" +- unpack DivX-style packed B-frames in MPEG-4 bitstream filter +- WebM Live Chunk Muxer +- nvenc level and tier options +- chorus filter +- Canopus HQ/HQA decoder +- Automatically rotate videos based on metadata in ffmpeg +- improved Quickdraw compatibility +- VP9 high bit-depth and extended colorspaces decoding support +- WebPAnimEncoder API when available for encoding and muxing WebP +- Direct3D11-accelerated decoding +- Support Secure Transport +- Multipart JPEG demuxer + + +version 2.6: +- nvenc encoder +- 10bit spp filter +- colorlevels filter +- RIFX format for *.wav files - RTP/mpegts muxer -- VP8 in Ogg demuxing +- non continuous cache protocol support +- tblend filter +- cropdetect support for non 8bpp, absolute (if limit >= 1) and relative (if limit < 1.0) threshold +- Camellia symmetric block cipher - OpenH264 encoder wrapper +- VOC seeking support +- Closed caption Decoder +- fspp, uspp, pp7 MPlayer postprocessing filters ported to native filters +- showpalette filter +- Twofish symmetric block cipher - Support DNx100 (960x720@8) -- Direct3D11-accelerated decoding +- eq2 filter ported from libmpcodecs as eq filter +- removed libmpcodecs +- Changed default DNxHD colour range in QuickTime .mov derivatives to mpeg range +- ported softpulldown filter from libmpcodecs as repeatfields filter +- dcshift filter +- RTP depacketizer for loss tolerant payload format for MP3 audio (RFC 5219) +- RTP depacketizer for AC3 payload format (RFC 4184) +- palettegen and paletteuse filters +- VP9 RTP payload format (draft 0) experimental depacketizer +- RTP depacketizer for DV (RFC 6469) - DXVA2-accelerated HEVC decoding - AAC ELD 480 decoding - Intel QSV-accelerated H.264 decoding diff --cc configure index 84a68ef26c,f016ebd170..cb3d304f49 --- a/configure +++ b/configure @@@ -2948,60 -2356,10 +2948,61 @@@ histeq_filter_deps="gpl hqdn3d_filter_deps="gpl" hwupload_cuda_filter_deps="cuda" interlace_filter_deps="gpl" +kerndeint_filter_deps="gpl" +ladspa_filter_deps="ladspa dlopen" +mcdeint_filter_deps="avcodec gpl" +movie_filter_deps="avcodec avformat" +mpdecimate_filter_deps="gpl" +mpdecimate_filter_select="pixelutils" +mptestsrc_filter_deps="gpl" +negate_filter_deps="lut_filter" +nnedi_filter_deps="gpl" +ocr_filter_deps="libtesseract" ocv_filter_deps="libopencv" +owdenoise_filter_deps="gpl" +pan_filter_deps="swresample" +perspective_filter_deps="gpl" +phase_filter_deps="gpl" +pp7_filter_deps="gpl" +pp_filter_deps="gpl postproc" +pullup_filter_deps="gpl" +removelogo_filter_deps="avcodec avformat swscale" +repeatfields_filter_deps="gpl" resample_filter_deps="avresample" +rubberband_filter_deps="librubberband" +sab_filter_deps="gpl swscale" +scale2ref_filter_deps="swscale" scale_filter_deps="swscale" +select_filter_select="pixelutils" +showcqt_filter_deps="avcodec avformat swscale" +showcqt_filter_select="fft" +showfreqs_filter_deps="avcodec" +showfreqs_filter_select="fft" +showspectrum_filter_deps="avcodec" +showspectrum_filter_select="fft" +showspectrumpic_filter_deps="avcodec" +showspectrumpic_filter_select="fft" +smartblur_filter_deps="gpl swscale" +sofalizer_filter_deps="netcdf avcodec" +sofalizer_filter_select="fft" +spectrumsynth_filter_deps="avcodec" +spectrumsynth_filter_select="fft" +spp_filter_deps="gpl avcodec" +spp_filter_select="fft idctdsp fdctdsp me_cmp pixblockdsp" +stereo3d_filter_deps="gpl" +subtitles_filter_deps="avformat avcodec libass" +super2xsai_filter_deps="gpl" +pixfmts_super2xsai_test_deps="super2xsai_filter" +tinterlace_filter_deps="gpl" +tinterlace_merge_test_deps="tinterlace_filter" +tinterlace_pad_test_deps="tinterlace_filter" +uspp_filter_deps="gpl avcodec" +vidstabdetect_filter_deps="libvidstab" +vidstabtransform_filter_deps="libvidstab" +zmq_filter_deps="libzmq" +zoompan_filter_deps="swscale" +zscale_filter_deps="libzimg" + scale_vaapi_filter_deps="vaapi VAProcPipelineParameterBuffer" # examples avcodec_example_deps="avcodec avutil" @@@ -5448,13 -4455,10 +5449,14 @@@ check_lib "CoreServices/CoreServices.h check_struct "sys/time.h sys/resource.h" "struct rusage" ru_maxrss check_type "windows.h dxva.h" "DXVA_PicParams_HEVC" -DWINAPI_FAMILY=WINAPI_FAMILY_DESKTOP_APP -D_CRT_BUILD_DESKTOP_APP=0 +check_type "windows.h dxva.h" "DXVA_PicParams_VP9" -DWINAPI_FAMILY=WINAPI_FAMILY_DESKTOP_APP -D_CRT_BUILD_DESKTOP_APP=0 check_type "windows.h d3d11.h" "ID3D11VideoDecoder" +check_type "windows.h d3d11.h" "ID3D11VideoContext" check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602 +check_type "va/va.h" "VAPictureParameterBufferHEVC" +check_type "va/va.h" "VADecPictureParameterBufferVP9" + check_type "va/va.h va/va_vpp.h" "VAProcPipelineParameterBuffer" check_type "vdpau/vdpau.h" "VdpPictureInfoHEVC" diff --cc libavfilter/Makefile index f1f37cb093,0f9643d9f5..d71a17b790 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@@ -199,51 -63,17 +199,52 @@@ OBJS-$(CONFIG_LUT3D_FILTER OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o +OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o +OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o +OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o +OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o +OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o +OBJS-$(CONFIG_NNEDI_FILTER) += vf_nnedi.o OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o +OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o OBJS-$(CONFIG_NULL_FILTER) += vf_null.o +OBJS-$(CONFIG_OCR_FILTER) += vf_ocr.o OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o -OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o +OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o +OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o +OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o +OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o +OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o dualinput.o framesync.o +OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o +OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o +OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o +OBJS-$(CONFIG_PP_FILTER) += vf_pp.o +OBJS-$(CONFIG_PP7_FILTER) += vf_pp7.o +OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o +OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o +OBJS-$(CONFIG_QP_FILTER) += vf_qp.o +OBJS-$(CONFIG_RANDOM_FILTER) += vf_random.o +OBJS-$(CONFIG_READVITC_FILTER) += vf_readvitc.o +OBJS-$(CONFIG_REALTIME_FILTER) += f_realtime.o +OBJS-$(CONFIG_REMAP_FILTER) += vf_remap.o framesync.o +OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += vf_removegrain.o +OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o +OBJS-$(CONFIG_REPEATFIELDS_FILTER) += vf_repeatfields.o +OBJS-$(CONFIG_REVERSE_FILTER) += f_reverse.o +OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o +OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o +OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o +OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o +OBJS-$(CONFIG_SELECT_FILTER) += f_select.o +OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o +OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o + OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o -OBJS-$(CONFIG_SELECT_FILTER) += vf_select.o OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o +OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o OBJS-$(CONFIG_SETTB_FILTER) += settb.o diff --cc libavfilter/allfilters.c index ad6ba21da5,d76db06f2a..7f58c7e132 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@@ -220,50 -89,17 +220,51 @@@ void avfilter_register_all(void REGISTER_FILTER(LUT, lut, vf); REGISTER_FILTER(LUTRGB, lutrgb, vf); REGISTER_FILTER(LUTYUV, lutyuv, vf); + REGISTER_FILTER(MASKEDMERGE, maskedmerge, vf); + REGISTER_FILTER(MCDEINT, mcdeint, vf); + REGISTER_FILTER(MERGEPLANES, mergeplanes, vf); + REGISTER_FILTER(METADATA, metadata, vf); + REGISTER_FILTER(MPDECIMATE, mpdecimate, vf); REGISTER_FILTER(NEGATE, negate, vf); + REGISTER_FILTER(NNEDI, nnedi, vf); REGISTER_FILTER(NOFORMAT, noformat, vf); + REGISTER_FILTER(NOISE, noise, vf); REGISTER_FILTER(NULL, null, vf); + REGISTER_FILTER(OCR, ocr, vf); REGISTER_FILTER(OCV, ocv, vf); REGISTER_FILTER(OVERLAY, overlay, vf); + REGISTER_FILTER(OWDENOISE, owdenoise, vf); REGISTER_FILTER(PAD, pad, vf); + REGISTER_FILTER(PALETTEGEN, palettegen, vf); + REGISTER_FILTER(PALETTEUSE, paletteuse, vf); + REGISTER_FILTER(PERMS, perms, vf); + REGISTER_FILTER(PERSPECTIVE, perspective, vf); + REGISTER_FILTER(PHASE, phase, vf); REGISTER_FILTER(PIXDESCTEST, pixdesctest, vf); + REGISTER_FILTER(PP, pp, vf); + REGISTER_FILTER(PP7, pp7, vf); + REGISTER_FILTER(PSNR, psnr, vf); + REGISTER_FILTER(PULLUP, pullup, vf); + REGISTER_FILTER(QP, qp, vf); + REGISTER_FILTER(RANDOM, random, vf); + REGISTER_FILTER(READVITC, readvitc, vf); + REGISTER_FILTER(REALTIME, realtime, vf); + REGISTER_FILTER(REMAP, remap, vf); + REGISTER_FILTER(REMOVEGRAIN, removegrain, vf); + REGISTER_FILTER(REMOVELOGO, removelogo, vf); + REGISTER_FILTER(REPEATFIELDS, repeatfields, vf); + REGISTER_FILTER(REVERSE, reverse, vf); + REGISTER_FILTER(ROTATE, rotate, vf); + REGISTER_FILTER(SAB, sab, vf); REGISTER_FILTER(SCALE, scale, vf); + REGISTER_FILTER(SCALE2REF, scale2ref, vf); + REGISTER_FILTER(SCALE_VAAPI, scale_vaapi, vf); REGISTER_FILTER(SELECT, select, vf); + REGISTER_FILTER(SELECTIVECOLOR, selectivecolor, vf); + REGISTER_FILTER(SENDCMD, sendcmd, vf); + REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf); REGISTER_FILTER(SETDAR, setdar, vf); + REGISTER_FILTER(SETFIELD, setfield, vf); REGISTER_FILTER(SETPTS, setpts, vf); REGISTER_FILTER(SETSAR, setsar, vf); REGISTER_FILTER(SETTB, settb, vf); diff --cc libavfilter/version.h index f4ade1cf15,1ca0f37a6a..927ec27e55 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@@ -29,9 -29,9 +29,9 @@@ #include "libavutil/version.h" -#define LIBAVFILTER_VERSION_MAJOR 6 -#define LIBAVFILTER_VERSION_MINOR 3 -#define LIBAVFILTER_VERSION_MICRO 0 +#define LIBAVFILTER_VERSION_MAJOR 6 +#define LIBAVFILTER_VERSION_MINOR 43 - #define LIBAVFILTER_VERSION_MICRO 100 ++#define LIBAVFILTER_VERSION_MICRO 101 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ LIBAVFILTER_VERSION_MINOR, \ diff --cc libavfilter/vf_scale_vaapi.c index 0000000000,d5c1847aad..f7eb89182e mode 000000,100644..100644 --- a/libavfilter/vf_scale_vaapi.c +++ b/libavfilter/vf_scale_vaapi.c @@@ -1,0 -1,460 +1,460 @@@ + /* - * This file is part of Libav. ++ * This file is part of FFmpeg. + * - * Libav is free software; you can redistribute it and/or ++ * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * - * Libav is distributed in the hope that it will be useful, ++ * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software ++ * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + #include + + #include + #include + + #include "libavutil/avassert.h" + #include "libavutil/hwcontext.h" + #include "libavutil/hwcontext_vaapi.h" + #include "libavutil/mem.h" + #include "libavutil/opt.h" + #include "libavutil/pixdesc.h" + + #include "avfilter.h" + #include "formats.h" + #include "internal.h" + + typedef struct ScaleVAAPIContext { + const AVClass *class; + + AVVAAPIDeviceContext *hwctx; + AVBufferRef *device_ref; + + int valid_ids; + VAConfigID va_config; + VAContextID va_context; + + AVBufferRef *input_frames_ref; + AVHWFramesContext *input_frames; + + AVBufferRef *output_frames_ref; + AVHWFramesContext *output_frames; + + char *output_format_string; + enum AVPixelFormat output_format; + int output_width; + int output_height; + + } ScaleVAAPIContext; + + + static int scale_vaapi_query_formats(AVFilterContext *avctx) + { + enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE, + }; + + ff_formats_ref(ff_make_format_list(pix_fmts), + &avctx->inputs[0]->out_formats); + ff_formats_ref(ff_make_format_list(pix_fmts), + &avctx->outputs[0]->in_formats); + + return 0; + } + + static int scale_vaapi_pipeline_uninit(ScaleVAAPIContext *ctx) + { + if (ctx->va_context != VA_INVALID_ID) { + vaDestroyContext(ctx->hwctx->display, ctx->va_context); + ctx->va_context = VA_INVALID_ID; + } + + if (ctx->va_config != VA_INVALID_ID) { + vaDestroyConfig(ctx->hwctx->display, ctx->va_config); + ctx->va_config = VA_INVALID_ID; + } + + av_buffer_unref(&ctx->output_frames_ref); + av_buffer_unref(&ctx->device_ref); + ctx->hwctx = 0; + + return 0; + } + + static int scale_vaapi_config_input(AVFilterLink *inlink) + { + AVFilterContext *avctx = inlink->dst; + ScaleVAAPIContext *ctx = avctx->priv; + + scale_vaapi_pipeline_uninit(ctx); + + if (!inlink->hw_frames_ctx) { + av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " + "required to associate the processing device.\n"); + return AVERROR(EINVAL); + } + + ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); + ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data; + + return 0; + } + + static int scale_vaapi_config_output(AVFilterLink *outlink) + { + AVFilterContext *avctx = outlink->src; + ScaleVAAPIContext *ctx = avctx->priv; + AVVAAPIHWConfig *hwconfig = NULL; + AVHWFramesConstraints *constraints = NULL; + AVVAAPIFramesContext *va_frames; + VAStatus vas; + int err, i; + + scale_vaapi_pipeline_uninit(ctx); + + ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); + ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx; + + av_assert0(ctx->va_config == VA_INVALID_ID); + vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone, + VAEntrypointVideoProc, 0, 0, &ctx->va_config); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " + "config: %d (%s).\n", vas, vaErrorStr(vas)); + err = AVERROR(EIO); + goto fail; + } + + hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); + if (!hwconfig) { + err = AVERROR(ENOMEM); + goto fail; + } + hwconfig->config_id = ctx->va_config; + + constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, + hwconfig); + if (!constraints) { + err = AVERROR(ENOMEM); + goto fail; + } + + if (ctx->output_format == AV_PIX_FMT_NONE) + ctx->output_format = ctx->input_frames->sw_format; + if (constraints->valid_sw_formats) { + for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { + if (ctx->output_format == constraints->valid_sw_formats[i]) + break; + } + if (constraints->valid_sw_formats[i] == AV_PIX_FMT_NONE) { + av_log(ctx, AV_LOG_ERROR, "Hardware does not support output " + "format %s.\n", av_get_pix_fmt_name(ctx->output_format)); + err = AVERROR(EINVAL); + goto fail; + } + } + + if (ctx->output_width < constraints->min_width || + ctx->output_height < constraints->min_height || + ctx->output_width > constraints->max_width || + ctx->output_height > constraints->max_height) { + av_log(ctx, AV_LOG_ERROR, "Hardware does not support scaling to " + "size %dx%d (constraints: width %d-%d height %d-%d).\n", + ctx->output_width, ctx->output_height, + constraints->min_width, constraints->max_width, + constraints->min_height, constraints->max_height); + err = AVERROR(EINVAL); + goto fail; + } + + ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref); + if (!ctx->output_frames_ref) { + av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context " + "for output.\n"); + err = AVERROR(ENOMEM); + goto fail; + } + + ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data; + + ctx->output_frames->format = AV_PIX_FMT_VAAPI; + ctx->output_frames->sw_format = ctx->output_format; + ctx->output_frames->width = ctx->output_width; + ctx->output_frames->height = ctx->output_height; + + // The number of output frames we need is determined by what follows + // the filter. If it's an encoder with complex frame reference + // structures then this could be very high. + ctx->output_frames->initial_pool_size = 10; + + err = av_hwframe_ctx_init(ctx->output_frames_ref); + if (err < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame " + "context for output: %d\n", err); + goto fail; + } + + va_frames = ctx->output_frames->hwctx; + + av_assert0(ctx->va_context == VA_INVALID_ID); + vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, + ctx->output_width, ctx->output_height, + VA_PROGRESSIVE, + va_frames->surface_ids, va_frames->nb_surfaces, + &ctx->va_context); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " + "context: %d (%s).\n", vas, vaErrorStr(vas)); + return AVERROR(EIO); + } + + outlink->w = ctx->output_width; + outlink->h = ctx->output_height; + + outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref); + if (!outlink->hw_frames_ctx) { + err = AVERROR(ENOMEM); + goto fail; + } + + av_freep(&hwconfig); + av_hwframe_constraints_free(&constraints); + return 0; + + fail: + av_buffer_unref(&ctx->output_frames_ref); + av_freep(&hwconfig); + av_hwframe_constraints_free(&constraints); + return err; + } + + static int vaapi_proc_colour_standard(enum AVColorSpace av_cs) + { + switch(av_cs) { + #define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va; + CS(BT709, BT709); + CS(BT470BG, BT601); + CS(SMPTE170M, SMPTE170M); + CS(SMPTE240M, SMPTE240M); + #undef CS + default: + return VAProcColorStandardNone; + } + } + + static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) + { + AVFilterContext *avctx = inlink->dst; + AVFilterLink *outlink = avctx->outputs[0]; + ScaleVAAPIContext *ctx = avctx->priv; + AVFrame *output_frame = NULL; + VASurfaceID input_surface, output_surface; + VAProcPipelineParameterBuffer params; + VABufferID params_id; + VAStatus vas; + int err; + + av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", + av_get_pix_fmt_name(input_frame->format), + input_frame->width, input_frame->height, input_frame->pts); + + if (ctx->va_context == VA_INVALID_ID) + return AVERROR(EINVAL); + + input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; + av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale input.\n", + input_surface); + + output_frame = av_frame_alloc(); + if (!output_frame) { + av_log(ctx, AV_LOG_ERROR, "Failed to allocate output frame."); + err = AVERROR(ENOMEM); + goto fail; + } + + err = av_hwframe_get_buffer(ctx->output_frames_ref, output_frame, 0); + if (err < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to get surface for " + "output: %d\n.", err); + } + + output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; + av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale output.\n", + output_surface); + + memset(¶ms, 0, sizeof(params)); + + params.surface = input_surface; + params.surface_region = 0; + params.surface_color_standard = + vaapi_proc_colour_standard(input_frame->colorspace); + + params.output_region = 0; + params.output_background_color = 0xff000000; + params.output_color_standard = params.surface_color_standard; + + params.pipeline_flags = 0; + params.filter_flags = VA_FILTER_SCALING_HQ; + + vas = vaBeginPicture(ctx->hwctx->display, + ctx->va_context, output_surface); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to attach new picture: " + "%d (%s).\n", vas, vaErrorStr(vas)); + err = AVERROR(EIO); + goto fail; + } + + vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, + VAProcPipelineParameterBufferType, + sizeof(params), 1, ¶ms, ¶ms_id); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to create parameter buffer: " + "%d (%s).\n", vas, vaErrorStr(vas)); + err = AVERROR(EIO); + goto fail_after_begin; + } + av_log(ctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", + params_id); + + vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, + ¶ms_id, 1); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to render parameter buffer: " + "%d (%s).\n", vas, vaErrorStr(vas)); + err = AVERROR(EIO); + goto fail_after_begin; + } + + vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to start picture processing: " + "%d (%s).\n", vas, vaErrorStr(vas)); + err = AVERROR(EIO); + goto fail_after_render; + } + + // This doesn't get freed automatically for some reason. + vas = vaDestroyBuffer(ctx->hwctx->display, params_id); + if (vas != VA_STATUS_SUCCESS) { + av_log(ctx, AV_LOG_ERROR, "Failed to free parameter buffer: " + "%d (%s).\n", vas, vaErrorStr(vas)); + err = AVERROR(EIO); + goto fail; + } + + av_frame_copy_props(output_frame, input_frame); + av_frame_free(&input_frame); + + av_log(ctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", + av_get_pix_fmt_name(output_frame->format), + output_frame->width, output_frame->height, output_frame->pts); + + return ff_filter_frame(outlink, output_frame); + + // We want to make sure that if vaBeginPicture has been called, we also + // call vaRenderPicture and vaEndPicture. These calls may well fail or + // do something else nasty, but once we're in this failure case there + // isn't much else we can do. + fail_after_begin: + vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); + fail_after_render: + vaEndPicture(ctx->hwctx->display, ctx->va_context); + fail: + av_frame_free(&input_frame); + av_frame_free(&output_frame); + return err; + } + + static av_cold int scale_vaapi_init(AVFilterContext *avctx) + { + ScaleVAAPIContext *ctx = avctx->priv; + + ctx->va_config = VA_INVALID_ID; + ctx->va_context = VA_INVALID_ID; + ctx->valid_ids = 1; + + if (ctx->output_format_string) { + ctx->output_format = av_get_pix_fmt(ctx->output_format_string); + if (ctx->output_format == AV_PIX_FMT_NONE) { + av_log(ctx, AV_LOG_ERROR, "Invalid output format.\n"); + return AVERROR(EINVAL); + } + } else { + // Use the input format once that is configured. + ctx->output_format = AV_PIX_FMT_NONE; + } + + return 0; + } + + static av_cold void scale_vaapi_uninit(AVFilterContext *avctx) + { + ScaleVAAPIContext *ctx = avctx->priv; + + if (ctx->valid_ids) + scale_vaapi_pipeline_uninit(ctx); + + av_buffer_unref(&ctx->input_frames_ref); + av_buffer_unref(&ctx->output_frames_ref); + av_buffer_unref(&ctx->device_ref); + } + + + #define OFFSET(x) offsetof(ScaleVAAPIContext, x) + #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM) + static const AVOption scale_vaapi_options[] = { + { "w", "Output video width", + OFFSET(output_width), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS }, + { "h", "Output video height", + OFFSET(output_height), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS }, + { "format", "Output video format (software format of hardware frames)", + OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS }, + { NULL }, + }; + + static const AVClass scale_vaapi_class = { + .class_name = "scale_vaapi", + .item_name = av_default_item_name, + .option = scale_vaapi_options, + .version = LIBAVUTIL_VERSION_INT, + }; + + static const AVFilterPad scale_vaapi_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = &scale_vaapi_filter_frame, + .config_props = &scale_vaapi_config_input, + }, + { NULL } + }; + + static const AVFilterPad scale_vaapi_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = &scale_vaapi_config_output, + }, + { NULL } + }; + + AVFilter ff_vf_scale_vaapi = { + .name = "scale_vaapi", + .description = NULL_IF_CONFIG_SMALL("Scale to/from VAAPI surfaces."), + .priv_size = sizeof(ScaleVAAPIContext), + .init = &scale_vaapi_init, + .uninit = &scale_vaapi_uninit, + .query_formats = &scale_vaapi_query_formats, + .inputs = scale_vaapi_inputs, + .outputs = scale_vaapi_outputs, + .priv_class = &scale_vaapi_class, + };