* limitations under the License.
*/
-//#define LOG_NDEBUG 0
#define LOG_TAG "SoftFFmpegVideo"
#include <utils/Log.h>
#define DEBUG_PKT 0
#define DEBUG_FRM 0
+#define DEBUG_EXTRADATA 0
static int decoder_reorder_pts = -1;
namespace android {
+static const CodecProfileLevel kM4VProfileLevels[] = {
+ { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level5 },
+ { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level5 },
+};
+
SoftFFmpegVideo::SoftFFmpegVideo(
const char *name,
const char *componentRole,
OMX_VIDEO_CODINGTYPE codingType,
+ const CodecProfileLevel *profileLevels,
+ size_t numProfileLevels,
const OMX_CALLBACKTYPE *callbacks,
OMX_PTR appData,
OMX_COMPONENTTYPE **component,
enum AVCodecID codecID)
: SoftVideoDecoderOMXComponent(name, componentRole, codingType,
- NULL, 0, 320, 240, callbacks, appData, component),
+ profileLevels, numProfileLevels, 352, 288, callbacks, appData, component),
mCodingType(codingType),
mFFmpegAlreadyInited(false),
mCodecAlreadyOpened(false),
initPorts(
kNumInputBuffers,
- 8192 /* inputBufferSize */,
+ 1024 * 1024 /* inputBufferSize */,
kNumOutputBuffers,
name);
int fast = 1;
avctx->workaround_bugs = 1;
- avctx->lowres = 0;
- if(avctx->lowres > codec->max_lowres){
- ALOGW("The maximum value for lowres supported by the decoder is %d",
- codec->max_lowres);
- avctx->lowres= codec->max_lowres;
- }
avctx->idct_algo = 0;
avctx->skip_frame = AVDISCARD_DEFAULT;
avctx->skip_idct = AVDISCARD_DEFAULT;
avctx->error_concealment = 3;
avctx->thread_count = 0;
- if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
- if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
- if(codec->capabilities & CODEC_CAP_DR1)
+ if (fast) avctx->flags2 |= AV_CODEC_FLAG2_FAST;
+#ifdef CODEC_FLAG_EMU_EDGE
+ if (codec->capabilities & AV_CODEC_CAP_DR1)
avctx->flags |= CODEC_FLAG_EMU_EDGE;
+#endif
}
status_t SoftFFmpegVideo::initDecoder(enum AVCodecID codecID) {
mCtx->codec_id = codecID;
mCtx->extradata_size = 0;
mCtx->extradata = NULL;
+ mCtx->width = mWidth;
+ mCtx->height = mHeight;
return OK;
}
}
if (mCodecAlreadyOpened) {
avcodec_close(mCtx);
- av_free(mCtx);
- mCtx = NULL;
+ mCodecAlreadyOpened = false;
}
+ av_free(mCtx);
+ mCtx = NULL;
}
if (mFrame) {
- av_freep(&mFrame);
+ av_frame_free(&mFrame);
mFrame = NULL;
}
if (mImgConvertCtx) {
BufferInfo *inInfo = *inQueue.begin();
OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+#if DEBUG_EXTRADATA
ALOGI("got extradata, ignore: %d, size: %u",
mIgnoreExtradata, inHeader->nFilledLen);
hexdump(inHeader->pBuffer + inHeader->nOffset, inHeader->nFilledLen);
+#endif
if (mIgnoreExtradata) {
ALOGI("got extradata, size: %u, but ignore it", inHeader->nFilledLen);
int orig_extradata_size = mCtx->extradata_size;
mCtx->extradata_size += inHeader->nFilledLen;
mCtx->extradata = (uint8_t *)realloc(mCtx->extradata,
- mCtx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ mCtx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
if (!mCtx->extradata) {
ALOGE("ffmpeg video decoder failed to alloc extradata memory.");
return ERR_OOM;
inHeader->pBuffer + inHeader->nOffset,
inHeader->nFilledLen);
memset(mCtx->extradata + mCtx->extradata_size, 0,
- FF_INPUT_BUFFER_PADDING_SIZE);
+ AV_INPUT_BUFFER_PADDING_SIZE);
}
}
}
if (!mExtradataReady) {
+#if DEBUG_EXTRADATA
ALOGI("extradata is ready, size: %d", mCtx->extradata_size);
hexdump(mCtx->extradata, mCtx->extradata_size);
+#endif
mExtradataReady = true;
}
int len = 0, err = 0;
int gotPic = false;
int32_t ret = ERR_OK;
- bool is_flush = (mEOSStatus != INPUT_DATA_AVAILABLE);
List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
BufferInfo *inInfo = NULL;
OMX_BUFFERHEADERTYPE *inHeader = NULL;
- if (!is_flush) {
+ if (!inQueue.empty()) {
inInfo = *inQueue.begin();
- CHECK(inInfo != NULL);
- inHeader = inInfo->mHeader;
+ if (inInfo != NULL) {
+ inHeader = inInfo->mHeader;
+ }
+ }
+
+ if (mEOSStatus == INPUT_EOS_SEEN && (!inHeader || inHeader->nFilledLen == 0)
+ && !(mCtx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
+ return ERR_FLUSHED;
}
AVPacket pkt;
initPacket(&pkt, inHeader);
- av_frame_unref(mFrame);
-
err = avcodec_decode_video2(mCtx, mFrame, &gotPic, &pkt);
+ av_packet_unref(&pkt);
if (err < 0) {
ALOGE("ffmpeg video decoder failed to decode frame. (%d)", err);
if (!gotPic) {
ALOGI("ffmpeg video decoder failed to get frame.");
//stop sending empty packets if the decoder is finished
- if (is_flush && mCtx->codec->capabilities & CODEC_CAP_DELAY) {
+ if ((mEOSStatus != INPUT_DATA_AVAILABLE && (mCtx->codec->capabilities & AV_CODEC_CAP_DELAY) &&
+ !inHeader) || inHeader->nFilledLen == 0) {
ret = ERR_FLUSHED;
} else {
ret = ERR_NO_FRM;
}
}
- if (!is_flush) {
+ if (!inQueue.empty()) {
inQueue.erase(inQueue.begin());
- inInfo->mOwnedByUs = false;
- notifyEmptyBufferDone(inHeader);
+ if (inInfo) {
+ inInfo->mOwnedByUs = false;
+ notifyEmptyBufferDone(inHeader);
+ }
}
return ret;
BufferInfo *outInfo = *outQueue.begin();
OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
- AVPicture pict;
+ uint8_t *data[4];
+ int linesize[4];
+
int64_t pts = AV_NOPTS_VALUE;
uint8_t *dst = outHeader->pBuffer;
uint32_t width = outputBufferWidth();
uint32_t height = outputBufferHeight();
- memset(&pict, 0, sizeof(AVPicture));
- pict.data[0] = dst;
- pict.data[1] = dst + width * height;
- pict.data[2] = pict.data[1] + (width / 2 * height / 2);
- pict.linesize[0] = width;
- pict.linesize[1] = width / 2;
- pict.linesize[2] = width / 2;
+ data[0] = dst;
+ data[1] = dst + width * height;
+ data[2] = data[1] + (width / 2 * height / 2);
+ linesize[0] = width;
+ linesize[1] = width / 2;
+ linesize[2] = width / 2;
ALOGV("drainOneOutputBuffer: frame_width=%d frame_height=%d width=%d height=%d ctx_width=%d ctx_height=%d", mFrame->width, mFrame->height, width, height, mCtx->width, mCtx->height);
int sws_flags = SWS_BICUBIC;
mImgConvertCtx = sws_getCachedContext(mImgConvertCtx,
mFrame->width, mFrame->height, (AVPixelFormat)mFrame->format, width, height,
- PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
+ AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
if (mImgConvertCtx == NULL) {
ALOGE("Cannot initialize the conversion context");
return ERR_SWS_FAILED;
}
sws_scale(mImgConvertCtx, mFrame->data, mFrame->linesize,
- 0, height, pict.data, pict.linesize);
+ 0, height, data, linesize);
outHeader->nOffset = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
}
//process timestamps
+#ifndef LIBAV_CONFIG_H
if (decoder_reorder_pts == -1) {
pts = av_frame_get_best_effort_timestamp(mFrame);
- } else if (decoder_reorder_pts) {
+ } else
+#endif
+ if (decoder_reorder_pts) {
pts = mFrame->pkt_pts;
} else {
pts = mFrame->pkt_dts;
return;
}
- if(!(mCtx->codec->capabilities & CODEC_CAP_DELAY)) {
- drainEOSOutputBuffer();
- mEOSStatus = OUTPUT_FRAMES_FLUSHED;
- return;
- }
-
while (!outQueue.empty()) {
int32_t err = decodeVideo();
if (err < ERR_OK) {
} else if (err == ERR_FLUSHED) {
drainEOSOutputBuffer();
return;
+ } else if (err == ERR_NO_FRM) {
+ continue;
} else {
CHECK_EQ(err, ERR_OK);
}
continue;
}
+ if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+ mEOSStatus = INPUT_EOS_SEEN;
+ continue;
+ }
+
if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
ALOGD("ffmpeg got codecconfig buffer");
if (handleExtradata() != ERR_OK) {
continue;
}
- if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
- mEOSStatus = INPUT_EOS_SEEN;
- }
-
if (!mCodecAlreadyOpened) {
if (openDecoder() != ERR_OK) {
notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
void SoftFFmpegVideo::onReset() {
ALOGV("onReset()");
+ enum AVCodecID codecID = mCtx->codec_id;
+ deInitDecoder();
+ initDecoder(codecID);
SoftVideoDecoderOMXComponent::onReset();
mSignalledError = false;
mExtradataReady = false;
+ mEOSStatus = INPUT_DATA_AVAILABLE;
}
SoftOMXComponent* SoftFFmpegVideo::createSoftOMXComponent(
TRESPASS();
}
+ if (!strcmp(name, "OMX.ffmpeg.mpeg4.decoder")) {
+ return new SoftFFmpegVideo(name, componentRole, codingType,
+ kM4VProfileLevels, ARRAY_SIZE(kM4VProfileLevels),
+ callbacks, appData, component, codecID);
+ }
+
return new SoftFFmpegVideo(name, componentRole, codingType,
- callbacks, appData, component, codecID);
+ NULL, 0,
+ callbacks, appData, component, codecID);
}
} // namespace android