OSDN Git Service

Adds audio support to DirectRenderer.
authorAndreas Huber <andih@google.com>
Wed, 13 Mar 2013 17:22:46 +0000 (10:22 -0700)
committerAndreas Huber <andih@google.com>
Wed, 13 Mar 2013 20:42:14 +0000 (13:42 -0700)
Change-Id: Ibf4df90aca29d638215e2da9b39e78bf3a2c4d08

media/libstagefright/wifi-display/sink/DirectRenderer.cpp
media/libstagefright/wifi-display/sink/DirectRenderer.h
media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp

index 5efcd17..12338e9 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <gui/SurfaceComposerClient.h>
 #include <gui/Surface.h>
+#include <media/AudioTrack.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 
 namespace android {
 
-DirectRenderer::DirectRenderer(
-        const sp<IGraphicBufferProducer> &bufferProducer)
-    : mSurfaceTex(bufferProducer),
-      mVideoDecoderNotificationPending(false),
-      mRenderPending(false),
-      mTimeOffsetUs(0ll),
-      mLatencySum(0ll),
-      mLatencyCount(0),
-      mNumFramesLate(0),
-      mNumFrames(0) {
-}
+/*
+   Drives the decoding process using a MediaCodec instance. Input buffers
+   queued by calls to "queueInputBuffer" are fed to the decoder as soon
+   as the decoder is ready for them, the client is notified about output
+   buffers as the decoder spits them out.
+*/
+struct DirectRenderer::DecoderContext : public AHandler {
+    enum {
+        kWhatOutputBufferReady,
+    };
+    DecoderContext(const sp<AMessage> &notify);
 
-DirectRenderer::~DirectRenderer() {
-    if (mVideoDecoder != NULL) {
-        mVideoDecoder->release();
-        mVideoDecoder.clear();
+    status_t init(
+            const sp<AMessage> &format,
+            const sp<IGraphicBufferProducer> &surfaceTex);
 
-        mVideoDecoderLooper->stop();
-        mVideoDecoderLooper.clear();
-    }
-}
+    void queueInputBuffer(const sp<ABuffer> &accessUnit);
 
-void DirectRenderer::setTimeOffset(int64_t offset) {
-    mTimeOffsetUs = offset;
-}
+    status_t renderOutputBufferAndRelease(size_t index);
+    status_t releaseOutputBuffer(size_t index);
 
-int64_t DirectRenderer::getAvgLatenessUs() {
-    if (mLatencyCount == 0) {
-        return 0ll;
-    }
+protected:
+    virtual ~DecoderContext();
 
-    int64_t avgLatencyUs = mLatencySum / mLatencyCount;
+    virtual void onMessageReceived(const sp<AMessage> &msg);
 
-    mLatencySum = 0ll;
-    mLatencyCount = 0;
+private:
+    enum {
+        kWhatDecoderNotify,
+    };
 
-    if (mNumFrames > 0) {
-        ALOGI("%d / %d frames late", mNumFramesLate, mNumFrames);
-        mNumFramesLate = 0;
-        mNumFrames = 0;
-    }
+    sp<AMessage> mNotify;
+    sp<ALooper> mDecoderLooper;
+    sp<MediaCodec> mDecoder;
+    Vector<sp<ABuffer> > mDecoderInputBuffers;
+    Vector<sp<ABuffer> > mDecoderOutputBuffers;
+    List<size_t> mDecoderInputBuffersAvailable;
+    bool mDecoderNotificationPending;
 
-    return avgLatencyUs;
-}
+    List<sp<ABuffer> > mAccessUnits;
 
-void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
-    switch (msg->what()) {
-        case kWhatVideoDecoderNotify:
-        {
-            onVideoDecoderNotify();
-            break;
-        }
+    void onDecoderNotify();
+    void scheduleDecoderNotification();
+    void queueDecoderInputBuffers();
 
-        case kWhatRender:
-        {
-            onRender();
-            break;
-        }
+    void queueOutputBuffer(
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
 
-        default:
-            TRESPASS();
-    }
+    DISALLOW_EVIL_CONSTRUCTORS(DecoderContext);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/*
+   A "push" audio renderer. The primary function of this renderer is to use
+   an AudioTrack in push mode and making sure not to block the event loop
+   be ensuring that calls to AudioTrack::write never block. This is done by
+   estimating an upper bound of data that can be written to the AudioTrack
+   buffer without delay.
+*/
+struct DirectRenderer::AudioRenderer : public AHandler {
+    AudioRenderer(const sp<DecoderContext> &decoderContext);
+
+    void queueInputBuffer(
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+protected:
+    virtual ~AudioRenderer();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatPushAudio,
+    };
+
+    struct BufferInfo {
+        size_t mIndex;
+        int64_t mTimeUs;
+        sp<ABuffer> mBuffer;
+    };
+
+    sp<DecoderContext> mDecoderContext;
+    sp<AudioTrack> mAudioTrack;
+
+    List<BufferInfo> mInputBuffers;
+    bool mPushPending;
+
+    size_t mNumFramesWritten;
+
+    void schedulePushIfNecessary();
+    void onPushAudio();
+
+    ssize_t writeNonBlocking(const uint8_t *data, size_t size);
+
+    DISALLOW_EVIL_CONSTRUCTORS(AudioRenderer);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DecoderContext::DecoderContext(const sp<AMessage> &notify)
+    : mNotify(notify),
+      mDecoderNotificationPending(false) {
 }
 
-void DirectRenderer::setFormat(
-        size_t trackIndex, const sp<AMessage> &format) {
-    if (trackIndex == 1) {
-        // Ignore audio for now.
-        return;
+DirectRenderer::DecoderContext::~DecoderContext() {
+    if (mDecoder != NULL) {
+        mDecoder->release();
+        mDecoder.clear();
+
+        mDecoderLooper->stop();
+        mDecoderLooper.clear();
     }
+}
 
-    CHECK(mVideoDecoder == NULL);
+status_t DirectRenderer::DecoderContext::init(
+        const sp<AMessage> &format,
+        const sp<IGraphicBufferProducer> &surfaceTex) {
+    CHECK(mDecoder == NULL);
 
     AString mime;
     CHECK(format->findString("mime", &mime));
 
-    mVideoDecoderLooper = new ALooper;
-    mVideoDecoderLooper->setName("video codec looper");
+    mDecoderLooper = new ALooper;
+    mDecoderLooper->setName("video codec looper");
 
-    mVideoDecoderLooper->start(
+    mDecoderLooper->start(
             false /* runOnCallingThread */,
             false /* canCallJava */,
             PRIORITY_DEFAULT);
 
-    mVideoDecoder = MediaCodec::CreateByType(
-            mVideoDecoderLooper, mime.c_str(), false /* encoder */);
+    mDecoder = MediaCodec::CreateByType(
+            mDecoderLooper, mime.c_str(), false /* encoder */);
 
-    CHECK(mVideoDecoder != NULL);
+    CHECK(mDecoder != NULL);
 
-    status_t err = mVideoDecoder->configure(
+    status_t err = mDecoder->configure(
             format,
-            mSurfaceTex == NULL
-                ? NULL : new Surface(mSurfaceTex),
+            surfaceTex == NULL
+                ? NULL : new Surface(surfaceTex),
             NULL /* crypto */,
             0 /* flags */);
     CHECK_EQ(err, (status_t)OK);
 
-    err = mVideoDecoder->start();
+    err = mDecoder->start();
     CHECK_EQ(err, (status_t)OK);
 
-    err = mVideoDecoder->getInputBuffers(
-            &mVideoDecoderInputBuffers);
+    err = mDecoder->getInputBuffers(
+            &mDecoderInputBuffers);
     CHECK_EQ(err, (status_t)OK);
 
-    scheduleVideoDecoderNotification();
+    err = mDecoder->getOutputBuffers(
+            &mDecoderOutputBuffers);
+    CHECK_EQ(err, (status_t)OK);
+
+    scheduleDecoderNotification();
+
+    return OK;
 }
 
-void DirectRenderer::queueAccessUnit(
-        size_t trackIndex, const sp<ABuffer> &accessUnit) {
-    if (trackIndex == 1) {
-        // Ignore audio for now.
-        return;
-    }
+void DirectRenderer::DecoderContext::queueInputBuffer(
+        const sp<ABuffer> &accessUnit) {
+    CHECK(mDecoder != NULL);
 
-    if (mVideoDecoder == NULL) {
-        sp<AMessage> format = new AMessage;
-        format->setString("mime", "video/avc");
-        format->setInt32("width", 640);
-        format->setInt32("height", 360);
+    mAccessUnits.push_back(accessUnit);
+    queueDecoderInputBuffers();
+}
 
-        setFormat(0, format);
-    }
+status_t DirectRenderer::DecoderContext::renderOutputBufferAndRelease(
+        size_t index) {
+    return mDecoder->renderOutputBufferAndRelease(index);
+}
 
-    mVideoAccessUnits.push_back(accessUnit);
-    queueVideoDecoderInputBuffers();
+status_t DirectRenderer::DecoderContext::releaseOutputBuffer(size_t index) {
+    return mDecoder->releaseOutputBuffer(index);
 }
 
-void DirectRenderer::queueVideoDecoderInputBuffers() {
-    if (mVideoDecoder == NULL) {
+void DirectRenderer::DecoderContext::queueDecoderInputBuffers() {
+    if (mDecoder == NULL) {
         return;
     }
 
     bool submittedMore = false;
 
-    while (!mVideoAccessUnits.empty()
-            && !mVideoDecoderInputBuffersAvailable.empty()) {
-        size_t index = *mVideoDecoderInputBuffersAvailable.begin();
+    while (!mAccessUnits.empty()
+            && !mDecoderInputBuffersAvailable.empty()) {
+        size_t index = *mDecoderInputBuffersAvailable.begin();
 
-        mVideoDecoderInputBuffersAvailable.erase(
-                mVideoDecoderInputBuffersAvailable.begin());
+        mDecoderInputBuffersAvailable.erase(
+                mDecoderInputBuffersAvailable.begin());
 
-        sp<ABuffer> srcBuffer = *mVideoAccessUnits.begin();
-        mVideoAccessUnits.erase(mVideoAccessUnits.begin());
+        sp<ABuffer> srcBuffer = *mAccessUnits.begin();
+        mAccessUnits.erase(mAccessUnits.begin());
 
         const sp<ABuffer> &dstBuffer =
-            mVideoDecoderInputBuffers.itemAt(index);
+            mDecoderInputBuffers.itemAt(index);
 
         memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
 
         int64_t timeUs;
         CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
 
-        status_t err = mVideoDecoder->queueInputBuffer(
+        status_t err = mDecoder->queueInputBuffer(
                 index,
                 0 /* offset */,
                 srcBuffer->size(),
@@ -198,19 +248,33 @@ void DirectRenderer::queueVideoDecoderInputBuffers() {
     }
 
     if (submittedMore) {
-        scheduleVideoDecoderNotification();
+        scheduleDecoderNotification();
+    }
+}
+
+void DirectRenderer::DecoderContext::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDecoderNotify:
+        {
+            onDecoderNotify();
+            break;
+        }
+
+        default:
+            TRESPASS();
     }
 }
 
-void DirectRenderer::onVideoDecoderNotify() {
-    mVideoDecoderNotificationPending = false;
+void DirectRenderer::DecoderContext::onDecoderNotify() {
+    mDecoderNotificationPending = false;
 
     for (;;) {
         size_t index;
-        status_t err = mVideoDecoder->dequeueInputBuffer(&index);
+        status_t err = mDecoder->dequeueInputBuffer(&index);
 
         if (err == OK) {
-            mVideoDecoderInputBuffersAvailable.push_back(index);
+            mDecoderInputBuffersAvailable.push_back(index);
         } else if (err == -EAGAIN) {
             break;
         } else {
@@ -218,7 +282,7 @@ void DirectRenderer::onVideoDecoderNotify() {
         }
     }
 
-    queueVideoDecoderInputBuffers();
+    queueDecoderInputBuffers();
 
     for (;;) {
         size_t index;
@@ -226,7 +290,7 @@ void DirectRenderer::onVideoDecoderNotify() {
         size_t size;
         int64_t timeUs;
         uint32_t flags;
-        status_t err = mVideoDecoder->dequeueOutputBuffer(
+        status_t err = mDecoder->dequeueOutputBuffer(
                 &index,
                 &offset,
                 &size,
@@ -234,9 +298,12 @@ void DirectRenderer::onVideoDecoderNotify() {
                 &flags);
 
         if (err == OK) {
-            queueOutputBuffer(index, timeUs);
+            queueOutputBuffer(
+                    index, timeUs, mDecoderOutputBuffers.itemAt(index));
         } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
-            // We don't care.
+            err = mDecoder->getOutputBuffers(
+                    &mDecoderOutputBuffers);
+            CHECK_EQ(err, (status_t)OK);
         } else if (err == INFO_FORMAT_CHANGED) {
             // We don't care.
         } else if (err == -EAGAIN) {
@@ -246,48 +313,315 @@ void DirectRenderer::onVideoDecoderNotify() {
         }
     }
 
-    scheduleVideoDecoderNotification();
+    scheduleDecoderNotification();
 }
 
-void DirectRenderer::queueOutputBuffer(size_t index, int64_t timeUs) {
-#if 1
-    OutputInfo info;
+void DirectRenderer::DecoderContext::scheduleDecoderNotification() {
+    if (mDecoderNotificationPending) {
+        return;
+    }
+
+    sp<AMessage> notify =
+        new AMessage(kWhatDecoderNotify, id());
+
+    mDecoder->requestActivityNotification(notify);
+    mDecoderNotificationPending = true;
+}
+
+void DirectRenderer::DecoderContext::queueOutputBuffer(
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatOutputBufferReady);
+    msg->setSize("index", index);
+    msg->setInt64("timeUs", timeUs);
+    msg->setBuffer("buffer", buffer);
+    msg->post();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::AudioRenderer::AudioRenderer(
+        const sp<DecoderContext> &decoderContext)
+    : mDecoderContext(decoderContext),
+      mPushPending(false),
+      mNumFramesWritten(0) {
+    mAudioTrack = new AudioTrack(
+            AUDIO_STREAM_DEFAULT,
+            48000.0f,
+            AUDIO_FORMAT_PCM,
+            AUDIO_CHANNEL_OUT_STEREO,
+            (int)0 /* frameCount */);
+
+    CHECK_EQ((status_t)OK, mAudioTrack->initCheck());
+
+    mAudioTrack->start();
+}
+
+DirectRenderer::AudioRenderer::~AudioRenderer() {
+}
+
+void DirectRenderer::AudioRenderer::queueInputBuffer(
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    BufferInfo info;
     info.mIndex = index;
-    info.mTimeUs = timeUs + mTimeOffsetUs;
-    mOutputBuffers.push_back(info);
+    info.mTimeUs = timeUs;
+    info.mBuffer = buffer;
 
-    scheduleRenderIfNecessary();
-#else
-    mLatencySum += ALooper::GetNowUs() - (timeUs + mTimeOffsetUs);
-    ++mLatencyCount;
+    mInputBuffers.push_back(info);
+    schedulePushIfNecessary();
+}
 
-    status_t err = mVideoDecoder->renderOutputBufferAndRelease(index);
-    CHECK_EQ(err, (status_t)OK);
-#endif
+void DirectRenderer::AudioRenderer::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatPushAudio:
+        {
+            onPushAudio();
+            break;
+        }
+
+        default:
+            break;
+    }
 }
 
-void DirectRenderer::scheduleRenderIfNecessary() {
-    if (mRenderPending || mOutputBuffers.empty()) {
+void DirectRenderer::AudioRenderer::schedulePushIfNecessary() {
+    if (mPushPending || mInputBuffers.empty()) {
         return;
     }
 
-    mRenderPending = true;
+    mPushPending = true;
+
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioTrack->getPosition(&numFramesPlayed),
+             (status_t)OK);
+
+    uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+
+    // This is how long the audio sink will have data to
+    // play back.
+    const float msecsPerFrame = 1000.0f / mAudioTrack->getSampleRate();
+
+    int64_t delayUs =
+        msecsPerFrame * numFramesPendingPlayout * 1000ll;
 
-    int64_t timeUs = (*mOutputBuffers.begin()).mTimeUs;
+    // Let's give it more data after about half that time
+    // has elapsed.
+    (new AMessage(kWhatPushAudio, id()))->post(delayUs / 2);
+}
+
+void DirectRenderer::AudioRenderer::onPushAudio() {
+    mPushPending = false;
+
+    while (!mInputBuffers.empty()) {
+        const BufferInfo &info = *mInputBuffers.begin();
+
+        ssize_t n = writeNonBlocking(
+                info.mBuffer->data(), info.mBuffer->size());
+
+        if (n < (ssize_t)info.mBuffer->size()) {
+            CHECK_GE(n, 0);
+
+            info.mBuffer->setRange(
+                    info.mBuffer->offset() + n, info.mBuffer->size() - n);
+            break;
+        }
+
+        mDecoderContext->releaseOutputBuffer(info.mIndex);
+
+        mInputBuffers.erase(mInputBuffers.begin());
+    }
+
+    schedulePushIfNecessary();
+}
+
+ssize_t DirectRenderer::AudioRenderer::writeNonBlocking(
+        const uint8_t *data, size_t size) {
+    uint32_t numFramesPlayed;
+    status_t err = mAudioTrack->getPosition(&numFramesPlayed);
+    if (err != OK) {
+        return err;
+    }
+
+    ssize_t numFramesAvailableToWrite =
+        mAudioTrack->frameCount() - (mNumFramesWritten - numFramesPlayed);
+
+    size_t numBytesAvailableToWrite =
+        numFramesAvailableToWrite * mAudioTrack->frameSize();
+
+    if (size > numBytesAvailableToWrite) {
+        size = numBytesAvailableToWrite;
+    }
+
+    CHECK_EQ(mAudioTrack->write(data, size), (ssize_t)size);
+
+    size_t numFramesWritten = size / mAudioTrack->frameSize();
+    mNumFramesWritten += numFramesWritten;
+
+    return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DirectRenderer(
+        const sp<IGraphicBufferProducer> &bufferProducer)
+    : mSurfaceTex(bufferProducer),
+      mVideoRenderPending(false),
+      mLatencySum(0ll),
+      mLatencyCount(0),
+      mNumFramesLate(0),
+      mNumFrames(0) {
+}
+
+DirectRenderer::~DirectRenderer() {
+}
+
+int64_t DirectRenderer::getAvgLatenessUs() {
+    if (mLatencyCount == 0) {
+        return 0ll;
+    }
+
+    int64_t avgLatencyUs = mLatencySum / mLatencyCount;
+
+    mLatencySum = 0ll;
+    mLatencyCount = 0;
+
+    if (mNumFrames > 0) {
+        ALOGI("%d / %d frames late", mNumFramesLate, mNumFrames);
+        mNumFramesLate = 0;
+        mNumFrames = 0;
+    }
+
+    return avgLatencyUs;
+}
+
+void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDecoderNotify:
+        {
+            onDecoderNotify(msg);
+            break;
+        }
+
+        case kWhatRenderVideo:
+        {
+            onRenderVideo();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+    CHECK_LT(trackIndex, 2u);
+
+    CHECK(mDecoderContext[trackIndex] == NULL);
+
+    sp<AMessage> notify = new AMessage(kWhatDecoderNotify, id());
+    notify->setSize("trackIndex", trackIndex);
+
+    mDecoderContext[trackIndex] = new DecoderContext(notify);
+    looper()->registerHandler(mDecoderContext[trackIndex]);
+
+    CHECK_EQ((status_t)OK,
+             mDecoderContext[trackIndex]->init(
+                 format, trackIndex == 0 ? mSurfaceTex : NULL));
+
+    if (trackIndex == 1) {
+        // Audio
+        mAudioRenderer = new AudioRenderer(mDecoderContext[1]);
+        looper()->registerHandler(mAudioRenderer);
+    }
+}
+
+void DirectRenderer::queueAccessUnit(
+        size_t trackIndex, const sp<ABuffer> &accessUnit) {
+    CHECK_LT(trackIndex, 2u);
+
+    if (mDecoderContext[trackIndex] == NULL) {
+        CHECK_EQ(trackIndex, 0u);
+
+        sp<AMessage> format = new AMessage;
+        format->setString("mime", "video/avc");
+        format->setInt32("width", 640);
+        format->setInt32("height", 360);
+
+        setFormat(trackIndex, format);
+    }
+
+    mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
+}
+
+void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case DecoderContext::kWhatOutputBufferReady:
+        {
+            size_t index;
+            CHECK(msg->findSize("index", &index));
+
+            int64_t timeUs;
+            CHECK(msg->findInt64("timeUs", &timeUs));
+
+            sp<ABuffer> buffer;
+            CHECK(msg->findBuffer("buffer", &buffer));
+
+            queueOutputBuffer(trackIndex, index, timeUs, buffer);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::queueOutputBuffer(
+        size_t trackIndex,
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    if (trackIndex == 1) {
+        // Audio
+        mAudioRenderer->queueInputBuffer(index, timeUs, buffer);
+        return;
+    }
+
+    OutputInfo info;
+    info.mIndex = index;
+    info.mTimeUs = timeUs;
+    info.mBuffer = buffer;
+    mVideoOutputBuffers.push_back(info);
+
+    scheduleVideoRenderIfNecessary();
+}
+
+void DirectRenderer::scheduleVideoRenderIfNecessary() {
+    if (mVideoRenderPending || mVideoOutputBuffers.empty()) {
+        return;
+    }
+
+    mVideoRenderPending = true;
+
+    int64_t timeUs = (*mVideoOutputBuffers.begin()).mTimeUs;
     int64_t nowUs = ALooper::GetNowUs();
 
     int64_t delayUs = timeUs - nowUs;
 
-    (new AMessage(kWhatRender, id()))->post(delayUs);
+    (new AMessage(kWhatRenderVideo, id()))->post(delayUs);
 }
 
-void DirectRenderer::onRender() {
-    mRenderPending = false;
+void DirectRenderer::onRenderVideo() {
+    mVideoRenderPending = false;
 
     int64_t nowUs = ALooper::GetNowUs();
 
-    while (!mOutputBuffers.empty()) {
-        const OutputInfo &info = *mOutputBuffers.begin();
+    while (!mVideoOutputBuffers.empty()) {
+        const OutputInfo &info = *mVideoOutputBuffers.begin();
 
         if (info.mTimeUs > nowUs) {
             break;
@@ -301,25 +635,14 @@ void DirectRenderer::onRender() {
         mLatencySum += nowUs - info.mTimeUs;
         ++mLatencyCount;
 
-        status_t err = mVideoDecoder->renderOutputBufferAndRelease(info.mIndex);
+        status_t err =
+            mDecoderContext[0]->renderOutputBufferAndRelease(info.mIndex);
         CHECK_EQ(err, (status_t)OK);
 
-        mOutputBuffers.erase(mOutputBuffers.begin());
+        mVideoOutputBuffers.erase(mVideoOutputBuffers.begin());
     }
 
-    scheduleRenderIfNecessary();
-}
-
-void DirectRenderer::scheduleVideoDecoderNotification() {
-    if (mVideoDecoderNotificationPending) {
-        return;
-    }
-
-    sp<AMessage> notify =
-        new AMessage(kWhatVideoDecoderNotify, id());
-
-    mVideoDecoder->requestActivityNotification(notify);
-    mVideoDecoderNotificationPending = true;
+    scheduleVideoRenderIfNecessary();
 }
 
 }  // namespace android
index 44be8f8..92c176a 100644 (file)
 namespace android {
 
 struct ABuffer;
+struct AudioTrack;
 struct IGraphicBufferProducer;
 struct MediaCodec;
 
-// An experimental renderer that only supports video and decodes video data
-// as soon as it arrives using a MediaCodec instance, rendering it without
-// delay. Primarily meant to finetune packet loss discovery and minimize
-// latency.
+// Renders audio and video data queued by calls to "queueAccessUnit".
 struct DirectRenderer : public AHandler {
     DirectRenderer(const sp<IGraphicBufferProducer> &bufferProducer);
 
     void setFormat(size_t trackIndex, const sp<AMessage> &format);
     void queueAccessUnit(size_t trackIndex, const sp<ABuffer> &accessUnit);
 
-    void setTimeOffset(int64_t offset);
-
     int64_t getAvgLatenessUs();
 
 protected:
@@ -45,30 +41,28 @@ protected:
     virtual ~DirectRenderer();
 
 private:
+    struct DecoderContext;
+    struct AudioRenderer;
+
     enum {
-        kWhatVideoDecoderNotify,
-        kWhatRender,
+        kWhatDecoderNotify,
+        kWhatRenderVideo,
     };
 
     struct OutputInfo {
         size_t mIndex;
         int64_t mTimeUs;
+        sp<ABuffer> mBuffer;
     };
 
     sp<IGraphicBufferProducer> mSurfaceTex;
 
-    sp<ALooper> mVideoDecoderLooper;
-    sp<MediaCodec> mVideoDecoder;
-    Vector<sp<ABuffer> > mVideoDecoderInputBuffers;
-    List<size_t> mVideoDecoderInputBuffersAvailable;
-    bool mVideoDecoderNotificationPending;
-
-    List<sp<ABuffer> > mVideoAccessUnits;
+    sp<DecoderContext> mDecoderContext[2];
+    List<OutputInfo> mVideoOutputBuffers;
 
-    List<OutputInfo> mOutputBuffers;
-    bool mRenderPending;
+    bool mVideoRenderPending;
 
-    int64_t mTimeOffsetUs;
+    sp<AudioRenderer> mAudioRenderer;
 
     int64_t mLatencySum;
     size_t mLatencyCount;
@@ -76,14 +70,14 @@ private:
     int32_t mNumFramesLate;
     int32_t mNumFrames;
 
-    void onVideoDecoderNotify();
-    void onRender();
+    void onDecoderNotify(const sp<AMessage> &msg);
 
-    void queueVideoDecoderInputBuffers();
-    void scheduleVideoDecoderNotification();
-    void scheduleRenderIfNecessary();
+    void queueOutputBuffer(
+            size_t trackIndex,
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
 
-    void queueOutputBuffer(size_t index, int64_t timeUs);
+    void scheduleVideoRenderIfNecessary();
+    void onRenderVideo();
 
     DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
 };
index d635c3a..62021c0 100644 (file)
@@ -337,12 +337,17 @@ void WifiDisplaySink::onMediaReceiverNotify(const sp<AMessage> &msg) {
                 ALOGI("Assuming %lld ms of latency.", latencyUs / 1000ll);
             }
 
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            int64_t timeUs;
+            CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
             // We are the timesync _client_,
             // client time = server time - time offset.
-            mRenderer->setTimeOffset(-mTimeOffsetUs + mTargetLatencyUs);
+            timeUs += mTargetLatencyUs - mTimeOffsetUs;
 
-            sp<ABuffer> accessUnit;
-            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+            accessUnit->meta()->setInt64("timeUs", timeUs);
 
             size_t trackIndex;
             CHECK(msg->findSize("trackIndex", &trackIndex));