OSDN Git Service

Make sure do disconnect from a BQ when its client dies.
[android-x86/frameworks-native.git] / libs / gui / BufferQueue.cpp
index b4c7231..50e3079 100644 (file)
 #include <EGL/eglext.h>
 
 #include <gui/BufferQueue.h>
+#include <gui/IConsumerListener.h>
 #include <gui/ISurfaceComposer.h>
 #include <private/gui/ComposerService.h>
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
+#include <utils/CallStack.h>
 
 // Macros for including the BufferQueue name in log messages
 #define ST_LOGV(x, ...) ALOGV("[%s] "x, mConsumerName.string(), ##__VA_ARGS__)
@@ -63,15 +65,15 @@ static const char* scalingModeName(int scalingMode) {
     }
 }
 
-BufferQueue::BufferQueue(bool allowSynchronousMode,
-        const sp<IGraphicBufferAlloc>& allocator) :
+BufferQueue::BufferQueue(const sp<IGraphicBufferAlloc>& allocator) :
     mDefaultWidth(1),
     mDefaultHeight(1),
     mMaxAcquiredBufferCount(1),
     mDefaultMaxBufferCount(2),
     mOverrideMaxBufferCount(0),
-    mSynchronousMode(false),
-    mAllowSynchronousMode(allowSynchronousMode),
+    mConsumerControlledByApp(false),
+    mDequeueBufferCannotBlock(false),
+    mUseAsyncBuffer(true),
     mConnectedApi(NO_CONNECTED_API),
     mAbandoned(false),
     mFrameCounter(0),
@@ -100,7 +102,8 @@ BufferQueue::~BufferQueue() {
 }
 
 status_t BufferQueue::setDefaultMaxBufferCountLocked(int count) {
-    if (count < 2 || count > NUM_BUFFER_SLOTS)
+    const int minBufferCount = mUseAsyncBuffer ? 2 : 1;
+    if (count < minBufferCount || count > NUM_BUFFER_SLOTS)
         return BAD_VALUE;
 
     mDefaultMaxBufferCount = count;
@@ -109,11 +112,6 @@ status_t BufferQueue::setDefaultMaxBufferCountLocked(int count) {
     return NO_ERROR;
 }
 
-bool BufferQueue::isSynchronousMode() const {
-    Mutex::Autolock lock(mMutex);
-    return mSynchronousMode;
-}
-
 void BufferQueue::setConsumerName(const String8& name) {
     Mutex::Autolock lock(mMutex);
     mConsumerName = name;
@@ -141,7 +139,7 @@ status_t BufferQueue::setTransformHint(uint32_t hint) {
 status_t BufferQueue::setBufferCount(int bufferCount) {
     ST_LOGV("setBufferCount: count=%d", bufferCount);
 
-    sp<ConsumerListener> listener;
+    sp<IConsumerListener> listener;
     {
         Mutex::Autolock lock(mMutex);
 
@@ -156,21 +154,21 @@ status_t BufferQueue::setBufferCount(int bufferCount) {
         }
 
         // Error out if the user has dequeued buffers
-        int maxBufferCount = getMaxBufferCountLocked();
-        for (int i=0 ; i<maxBufferCount; i++) {
+        for (int i=0 ; i<NUM_BUFFER_SLOTS; i++) {
             if (mSlots[i].mBufferState == BufferSlot::DEQUEUED) {
                 ST_LOGE("setBufferCount: client owns some buffers");
                 return -EINVAL;
             }
         }
 
-        const int minBufferSlots = getMinMaxBufferCountLocked();
         if (bufferCount == 0) {
             mOverrideMaxBufferCount = 0;
             mDequeueCondition.broadcast();
             return NO_ERROR;
         }
 
+        // fine to assume async to false before we're setting the buffer count
+        const int minBufferSlots = getMinMaxBufferCountLocked(false);
         if (bufferCount < minBufferSlots) {
             ST_LOGE("setBufferCount: requested buffer count (%d) is less than "
                     "minimum (%d)", bufferCount, minBufferSlots);
@@ -178,12 +176,10 @@ status_t BufferQueue::setBufferCount(int bufferCount) {
         }
 
         // here we're guaranteed that the client doesn't have dequeued buffers
-        // and will release all of its buffer references.
-        //
-        // XXX: Should this use drainQueueAndFreeBuffersLocked instead?
+        // and will release all of its buffer references.  We don't clear the
+        // queue, however, so currently queued buffers still get displayed.
         freeAllBuffersLocked();
         mOverrideMaxBufferCount = bufferCount;
-        mBufferHasBeenQueued = false;
         mDequeueCondition.broadcast();
         listener = mConsumerListener;
     } // scope for lock
@@ -217,11 +213,14 @@ int BufferQueue::query(int what, int* outValue)
         value = mDefaultBufferFormat;
         break;
     case NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS:
-        value = getMinUndequeuedBufferCountLocked();
+        value = getMinUndequeuedBufferCount(false);
         break;
     case NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND:
         value = (mQueue.size() >= 2);
         break;
+    case NATIVE_WINDOW_CONSUMER_USAGE_BITS:
+        value = mConsumerUsageBits;
+        break;
     default:
         return BAD_VALUE;
     }
@@ -237,15 +236,11 @@ status_t BufferQueue::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
         ST_LOGE("requestBuffer: BufferQueue has been abandoned!");
         return NO_INIT;
     }
-    int maxBufferCount = getMaxBufferCountLocked();
-    if (slot < 0 || maxBufferCount <= slot) {
+    if (slot < 0 || slot >= NUM_BUFFER_SLOTS) {
         ST_LOGE("requestBuffer: slot index out of range [0, %d]: %d",
-                maxBufferCount, slot);
+                NUM_BUFFER_SLOTS, slot);
         return BAD_VALUE;
     } else if (mSlots[slot].mBufferState != BufferSlot::DEQUEUED) {
-        // XXX: I vaguely recall there was some reason this can be valid, but
-        // for the life of me I can't recall under what circumstances that's
-        // the case.
         ST_LOGE("requestBuffer: slot %d is not owned by the client (state=%d)",
                 slot, mSlots[slot].mBufferState);
         return BAD_VALUE;
@@ -255,7 +250,7 @@ status_t BufferQueue::requestBuffer(int slot, sp<GraphicBuffer>* buf) {
     return NO_ERROR;
 }
 
-status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
+status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence, bool async,
         uint32_t w, uint32_t h, uint32_t format, uint32_t usage) {
     ATRACE_CALL();
     ST_LOGV("dequeueBuffer: w=%d h=%d fmt=%#x usage=%#x", w, h, format, usage);
@@ -279,7 +274,6 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
         usage |= mConsumerUsageBits;
 
         int found = -1;
-        int dequeuedCount = 0;
         bool tryAgain = true;
         while (tryAgain) {
             if (mAbandoned) {
@@ -287,7 +281,16 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
                 return NO_INIT;
             }
 
-            const int maxBufferCount = getMaxBufferCountLocked();
+            const int maxBufferCount = getMaxBufferCountLocked(async);
+            if (async && mOverrideMaxBufferCount) {
+                // FIXME: some drivers are manually setting the buffer-count (which they
+                // shouldn't), so we do this extra test here to handle that case.
+                // This is TEMPORARY, until we get this fixed.
+                if (mOverrideMaxBufferCount < maxBufferCount) {
+                    ST_LOGE("dequeueBuffer: async mode is invalid with buffercount override");
+                    return BAD_VALUE;
+                }
+            }
 
             // Free up any buffers that are in slots beyond the max buffer
             // count.
@@ -301,23 +304,28 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
 
             // look for a free buffer to give to the client
             found = INVALID_BUFFER_SLOT;
-            dequeuedCount = 0;
+            int dequeuedCount = 0;
+            int acquiredCount = 0;
             for (int i = 0; i < maxBufferCount; i++) {
                 const int state = mSlots[i].mBufferState;
-                if (state == BufferSlot::DEQUEUED) {
-                    dequeuedCount++;
-                }
-
-                if (state == BufferSlot::FREE) {
-                    /* We return the oldest of the free buffers to avoid
-                     * stalling the producer if possible.  This is because
-                     * the consumer may still have pending reads of the
-                     * buffers in flight.
-                     */
-                    if ((found < 0) ||
-                            mSlots[i].mFrameNumber < mSlots[found].mFrameNumber) {
-                        found = i;
-                    }
+                switch (state) {
+                    case BufferSlot::DEQUEUED:
+                        dequeuedCount++;
+                        break;
+                    case BufferSlot::ACQUIRED:
+                        acquiredCount++;
+                        break;
+                    case BufferSlot::FREE:
+                        /* We return the oldest of the free buffers to avoid
+                         * stalling the producer if possible.  This is because
+                         * the consumer may still have pending reads of the
+                         * buffers in flight.
+                         */
+                        if ((found < 0) ||
+                                mSlots[i].mFrameNumber < mSlots[found].mFrameNumber) {
+                            found = i;
+                        }
+                        break;
                 }
             }
 
@@ -336,7 +344,7 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
                 // make sure the client is not trying to dequeue more buffers
                 // than allowed.
                 const int newUndequeuedCount = maxBufferCount - (dequeuedCount+1);
-                const int minUndequeuedCount = getMinUndequeuedBufferCountLocked();
+                const int minUndequeuedCount = getMinUndequeuedBufferCount(async);
                 if (newUndequeuedCount < minUndequeuedCount) {
                     ST_LOGE("dequeueBuffer: min undequeued buffer count (%d) "
                             "exceeded (dequeued=%d undequeudCount=%d)",
@@ -350,6 +358,16 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
             // the max buffer count to change.
             tryAgain = found == INVALID_BUFFER_SLOT;
             if (tryAgain) {
+                // return an error if we're in "cannot block" mode (producer and consumer
+                // are controlled by the application) -- however, the consumer is allowed
+                // to acquire briefly an extra buffer (which could cause us to have to wait here)
+                // and that's okay because we know the wait will be brief (it happens
+                // if we dequeue a buffer while the consumer has acquired one but not released
+                // the old one yet -- for e.g.: see GLConsumer::updateTexImage()).
+                if (mDequeueBufferCannotBlock && (acquiredCount <= mMaxAcquiredBufferCount)) {
+                    ST_LOGE("dequeueBuffer: would block! returning an error instead.");
+                    return WOULD_BLOCK;
+                }
                 mDequeueCondition.wait(mMutex);
             }
         }
@@ -392,6 +410,13 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
             returnFlags |= IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION;
         }
 
+
+        if (CC_UNLIKELY(mSlots[buf].mFence == NULL)) {
+            ST_LOGE("dequeueBuffer: about to return a NULL fence from mSlot. "
+                    "buf=%d, w=%d, h=%d, format=%d",
+                    buf, buffer->width, buffer->height, buffer->format);
+        }
+
         dpy = mSlots[buf].mEglDisplay;
         eglFence = mSlots[buf].mEglFence;
         *outFence = mSlots[buf].mFence;
@@ -402,11 +427,9 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
     if (returnFlags & IGraphicBufferProducer::BUFFER_NEEDS_REALLOCATION) {
         status_t error;
         sp<GraphicBuffer> graphicBuffer(
-                mGraphicBufferAlloc->createGraphicBuffer(
-                        w, h, format, usage, &error));
+                mGraphicBufferAlloc->createGraphicBuffer(w, h, format, usage, &error));
         if (graphicBuffer == 0) {
-            ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer "
-                    "failed");
+            ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer failed");
             return error;
         }
 
@@ -418,6 +441,7 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
                 return NO_INIT;
             }
 
+            mSlots[*outBuf].mFrameNumber = ~0;
             mSlots[*outBuf].mGraphicBuffer = graphicBuffer;
         }
     }
@@ -435,44 +459,13 @@ status_t BufferQueue::dequeueBuffer(int *outBuf, sp<Fence>* outFence,
         eglDestroySyncKHR(dpy, eglFence);
     }
 
-    ST_LOGV("dequeueBuffer: returning slot=%d buf=%p flags=%#x", *outBuf,
+    ST_LOGV("dequeueBuffer: returning slot=%d/%llu buf=%p flags=%#x", *outBuf,
+            mSlots[*outBuf].mFrameNumber,
             mSlots[*outBuf].mGraphicBuffer->handle, returnFlags);
 
     return returnFlags;
 }
 
-status_t BufferQueue::setSynchronousMode(bool enabled) {
-    ATRACE_CALL();
-    ST_LOGV("setSynchronousMode: enabled=%d", enabled);
-    Mutex::Autolock lock(mMutex);
-
-    if (mAbandoned) {
-        ST_LOGE("setSynchronousMode: BufferQueue has been abandoned!");
-        return NO_INIT;
-    }
-
-    status_t err = OK;
-    if (!mAllowSynchronousMode && enabled)
-        return err;
-
-    if (!enabled) {
-        // going to asynchronous mode, drain the queue
-        err = drainQueueLocked();
-        if (err != NO_ERROR)
-            return err;
-    }
-
-    if (mSynchronousMode != enabled) {
-        // - if we're going to asynchronous mode, the queue is guaranteed to be
-        // empty here
-        // - if the client set the number of buffers, we're guaranteed that
-        // we have at least 3 (because we don't allow less)
-        mSynchronousMode = enabled;
-        mDequeueCondition.broadcast();
-    }
-    return err;
-}
-
 status_t BufferQueue::queueBuffer(int buf,
         const QueueBufferInput& input, QueueBufferOutput* output) {
     ATRACE_CALL();
@@ -482,29 +475,49 @@ status_t BufferQueue::queueBuffer(int buf,
     uint32_t transform;
     int scalingMode;
     int64_t timestamp;
+    bool isAutoTimestamp;
+    bool async;
     sp<Fence> fence;
 
-    input.deflate(&timestamp, &crop, &scalingMode, &transform, &fence);
+    input.deflate(&timestamp, &isAutoTimestamp, &crop, &scalingMode, &transform,
+            &async, &fence);
 
     if (fence == NULL) {
         ST_LOGE("queueBuffer: fence is NULL");
         return BAD_VALUE;
     }
 
-    ST_LOGV("queueBuffer: slot=%d time=%#llx crop=[%d,%d,%d,%d] tr=%#x "
-            "scale=%s",
-            buf, timestamp, crop.left, crop.top, crop.right, crop.bottom,
-            transform, scalingModeName(scalingMode));
+    switch (scalingMode) {
+        case NATIVE_WINDOW_SCALING_MODE_FREEZE:
+        case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
+        case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
+        case NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP:
+            break;
+        default:
+            ST_LOGE("unknown scaling mode: %d", scalingMode);
+            return -EINVAL;
+    }
 
-    sp<ConsumerListener> listener;
+    sp<IConsumerListener> listener;
 
     { // scope for the lock
         Mutex::Autolock lock(mMutex);
+
         if (mAbandoned) {
             ST_LOGE("queueBuffer: BufferQueue has been abandoned!");
             return NO_INIT;
         }
-        int maxBufferCount = getMaxBufferCountLocked();
+
+        const int maxBufferCount = getMaxBufferCountLocked(async);
+        if (async && mOverrideMaxBufferCount) {
+            // FIXME: some drivers are manually setting the buffer-count (which they
+            // shouldn't), so we do this extra test here to handle that case.
+            // This is TEMPORARY, until we get this fixed.
+            if (mOverrideMaxBufferCount < maxBufferCount) {
+                ST_LOGE("queueBuffer: async mode is invalid with buffercount override");
+                return BAD_VALUE;
+            }
+        }
         if (buf < 0 || buf >= maxBufferCount) {
             ST_LOGE("queueBuffer: slot index out of range [0, %d]: %d",
                     maxBufferCount, buf);
@@ -519,6 +532,12 @@ status_t BufferQueue::queueBuffer(int buf,
             return -EINVAL;
         }
 
+        ST_LOGV("queueBuffer: slot=%d/%llu time=%#llx crop=[%d,%d,%d,%d] "
+                "tr=%#x scale=%s",
+                buf, mFrameCounter + 1, timestamp,
+                crop.left, crop.top, crop.right, crop.bottom,
+                transform, scalingModeName(scalingMode));
+
         const sp<GraphicBuffer>& graphicBuffer(mSlots[buf].mGraphicBuffer);
         Rect bufferRect(graphicBuffer->getWidth(), graphicBuffer->getHeight());
         Rect croppedCrop;
@@ -529,52 +548,49 @@ status_t BufferQueue::queueBuffer(int buf,
             return -EINVAL;
         }
 
-        if (mSynchronousMode) {
-            // In synchronous mode we queue all buffers in a FIFO.
-            mQueue.push_back(buf);
+        mSlots[buf].mFence = fence;
+        mSlots[buf].mBufferState = BufferSlot::QUEUED;
+        mFrameCounter++;
+        mSlots[buf].mFrameNumber = mFrameCounter;
+
+        BufferItem item;
+        item.mAcquireCalled = mSlots[buf].mAcquireCalled;
+        item.mGraphicBuffer = mSlots[buf].mGraphicBuffer;
+        item.mCrop = crop;
+        item.mTransform = transform;
+        item.mScalingMode = scalingMode;
+        item.mTimestamp = timestamp;
+        item.mIsAutoTimestamp = isAutoTimestamp;
+        item.mFrameNumber = mFrameCounter;
+        item.mBuf = buf;
+        item.mFence = fence;
+        item.mIsDroppable = mDequeueBufferCannotBlock || async;
 
-            // Synchronous mode always signals that an additional frame should
-            // be consumed.
+        if (mQueue.empty()) {
+            // when the queue is empty, we can ignore "mDequeueBufferCannotBlock", and
+            // simply queue this buffer.
+            mQueue.push_back(item);
             listener = mConsumerListener;
         } else {
-            // In asynchronous mode we only keep the most recent buffer.
-            if (mQueue.empty()) {
-                mQueue.push_back(buf);
-
-                // Asynchronous mode only signals that a frame should be
-                // consumed if no previous frame was pending. If a frame were
-                // pending then the consumer would have already been notified.
-                listener = mConsumerListener;
+            // when the queue is not empty, we need to look at the front buffer
+            // state and see if we need to replace it.
+            Fifo::iterator front(mQueue.begin());
+            if (front->mIsDroppable) {
+                // buffer slot currently queued is marked free if still tracked
+                if (stillTracking(front)) {
+                    mSlots[front->mBuf].mBufferState = BufferSlot::FREE;
+                    // reset the frame number of the freed buffer so that it is the first in
+                    // line to be dequeued again.
+                    mSlots[front->mBuf].mFrameNumber = 0;
+                }
+                // and we record the new buffer in the queued list
+                *front = item;
             } else {
-                Fifo::iterator front(mQueue.begin());
-                // buffer currently queued is freed
-                mSlots[*front].mBufferState = BufferSlot::FREE;
-                // and we record the new buffer index in the queued list
-                *front = buf;
+                mQueue.push_back(item);
+                listener = mConsumerListener;
             }
         }
 
-        mSlots[buf].mTimestamp = timestamp;
-        mSlots[buf].mCrop = crop;
-        mSlots[buf].mTransform = transform;
-        mSlots[buf].mFence = fence;
-
-        switch (scalingMode) {
-            case NATIVE_WINDOW_SCALING_MODE_FREEZE:
-            case NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW:
-            case NATIVE_WINDOW_SCALING_MODE_SCALE_CROP:
-                break;
-            default:
-                ST_LOGE("unknown scaling mode: %d (ignoring)", scalingMode);
-                scalingMode = mSlots[buf].mScalingMode;
-                break;
-        }
-
-        mSlots[buf].mBufferState = BufferSlot::QUEUED;
-        mSlots[buf].mScalingMode = scalingMode;
-        mFrameCounter++;
-        mSlots[buf].mFrameNumber = mFrameCounter;
-
         mBufferHasBeenQueued = true;
         mDequeueCondition.broadcast();
 
@@ -601,10 +617,9 @@ void BufferQueue::cancelBuffer(int buf, const sp<Fence>& fence) {
         return;
     }
 
-    int maxBufferCount = getMaxBufferCountLocked();
-    if (buf < 0 || buf >= maxBufferCount) {
+    if (buf < 0 || buf >= NUM_BUFFER_SLOTS) {
         ST_LOGE("cancelBuffer: slot index out of range [0, %d]: %d",
-                maxBufferCount, buf);
+                NUM_BUFFER_SLOTS, buf);
         return;
     } else if (mSlots[buf].mBufferState != BufferSlot::DEQUEUED) {
         ST_LOGE("cancelBuffer: slot %d is not owned by the client (state=%d)",
@@ -620,9 +635,12 @@ void BufferQueue::cancelBuffer(int buf, const sp<Fence>& fence) {
     mDequeueCondition.broadcast();
 }
 
-status_t BufferQueue::connect(int api, QueueBufferOutput* output) {
+
+status_t BufferQueue::connect(const sp<IBinder>& token,
+        int api, bool producerControlledByApp, QueueBufferOutput* output) {
     ATRACE_CALL();
-    ST_LOGV("connect: api=%d", api);
+    ST_LOGV("connect: api=%d producerControlledByApp=%s", api,
+            producerControlledByApp ? "true" : "false");
     Mutex::Autolock lock(mMutex);
 
     if (mAbandoned) {
@@ -647,8 +665,14 @@ status_t BufferQueue::connect(int api, QueueBufferOutput* output) {
                 err = -EINVAL;
             } else {
                 mConnectedApi = api;
-                output->inflate(mDefaultWidth, mDefaultHeight, mTransformHint,
-                        mQueue.size());
+                output->inflate(mDefaultWidth, mDefaultHeight, mTransformHint, mQueue.size());
+
+                // set-up a death notification so that we can disconnect automatically
+                // when/if the remote producer dies.
+                // This will fail with INVALID_OPERATION if the "token" is local to our process.
+                if (token->linkToDeath(static_cast<IBinder::DeathRecipient*>(this)) == NO_ERROR) {
+                    mConnectedProducerToken = token;
+                }
             }
             break;
         default:
@@ -657,16 +681,27 @@ status_t BufferQueue::connect(int api, QueueBufferOutput* output) {
     }
 
     mBufferHasBeenQueued = false;
+    mDequeueBufferCannotBlock = mConsumerControlledByApp && producerControlledByApp;
 
     return err;
 }
 
+void BufferQueue::binderDied(const wp<IBinder>& who) {
+    // If we're here, it means that a producer we were connected to died.
+    // We're GUARANTEED that we still are connected to it because it has no other way
+    // to get disconnected -- or -- we wouldn't be here because we're removing this
+    // callback upon disconnect. Therefore, it's okay to read mConnectedApi without
+    // synchronization here.
+    int api = mConnectedApi;
+    this->disconnect(api);
+}
+
 status_t BufferQueue::disconnect(int api) {
     ATRACE_CALL();
     ST_LOGV("disconnect: api=%d", api);
 
     int err = NO_ERROR;
-    sp<ConsumerListener> listener;
+    sp<IConsumerListener> listener;
 
     { // Scope for the lock
         Mutex::Autolock lock(mMutex);
@@ -683,7 +718,15 @@ status_t BufferQueue::disconnect(int api) {
             case NATIVE_WINDOW_API_MEDIA:
             case NATIVE_WINDOW_API_CAMERA:
                 if (mConnectedApi == api) {
-                    drainQueueAndFreeBuffersLocked();
+                    freeAllBuffersLocked();
+                    // remove our death notification callback if we have one
+                    sp<IBinder> token = mConnectedProducerToken;
+                    if (token != NULL) {
+                        // this can fail if we're here because of the death notification
+                        // either way, we just ignore.
+                        token->unlinkToDeath(static_cast<IBinder::DeathRecipient*>(this));
+                    }
+                    mConnectedProducerToken = NULL;
                     mConnectedApi = NO_CONNECTED_API;
                     mDequeueCondition.broadcast();
                     listener = mConsumerListener;
@@ -707,36 +750,31 @@ status_t BufferQueue::disconnect(int api) {
     return err;
 }
 
-void BufferQueue::dump(String8& result) const
-{
-    char buffer[1024];
-    BufferQueue::dump(result, "", buffer, 1024);
-}
-
-void BufferQueue::dump(String8& result, const char* prefix,
-        char* buffer, size_t SIZE) const
-{
+void BufferQueue::dump(String8& result, const char* prefix) const {
     Mutex::Autolock _l(mMutex);
 
     String8 fifo;
     int fifoSize = 0;
     Fifo::const_iterator i(mQueue.begin());
     while (i != mQueue.end()) {
-       snprintf(buffer, SIZE, "%02d ", *i++);
-       fifoSize++;
-       fifo.append(buffer);
+        fifo.appendFormat("%02d:%p crop=[%d,%d,%d,%d], "
+                "xform=0x%02x, time=%#llx, scale=%s\n",
+                i->mBuf, i->mGraphicBuffer.get(),
+                i->mCrop.left, i->mCrop.top, i->mCrop.right,
+                i->mCrop.bottom, i->mTransform, i->mTimestamp,
+                scalingModeName(i->mScalingMode)
+                );
+        i++;
+        fifoSize++;
     }
 
-    int maxBufferCount = getMaxBufferCountLocked();
 
-    snprintf(buffer, SIZE,
-            "%s-BufferQueue maxBufferCount=%d, mSynchronousMode=%d, default-size=[%dx%d], "
+    result.appendFormat(
+            "%s-BufferQueue mMaxAcquiredBufferCount=%d, mDequeueBufferCannotBlock=%d, default-size=[%dx%d], "
             "default-format=%d, transform-hint=%02x, FIFO(%d)={%s}\n",
-            prefix, maxBufferCount, mSynchronousMode, mDefaultWidth,
+            prefix, mMaxAcquiredBufferCount, mDequeueBufferCannotBlock, mDefaultWidth,
             mDefaultHeight, mDefaultBufferFormat, mTransformHint,
             fifoSize, fifo.string());
-    result.append(buffer);
-
 
     struct {
         const char * operator()(int state) const {
@@ -750,27 +788,30 @@ void BufferQueue::dump(String8& result, const char* prefix,
         }
     } stateName;
 
+    // just trim the free buffers to not spam the dump
+    int maxBufferCount = 0;
+    for (int i=NUM_BUFFER_SLOTS-1 ; i>=0 ; i--) {
+        const BufferSlot& slot(mSlots[i]);
+        if ((slot.mBufferState != BufferSlot::FREE) || (slot.mGraphicBuffer != NULL)) {
+            maxBufferCount = i+1;
+            break;
+        }
+    }
+
     for (int i=0 ; i<maxBufferCount ; i++) {
         const BufferSlot& slot(mSlots[i]);
-        snprintf(buffer, SIZE,
-                "%s%s[%02d] "
-                "state=%-8s, crop=[%d,%d,%d,%d], "
-                "xform=0x%02x, time=%#llx, scale=%s",
-                prefix, (slot.mBufferState == BufferSlot::ACQUIRED)?">":" ", i,
-                stateName(slot.mBufferState),
-                slot.mCrop.left, slot.mCrop.top, slot.mCrop.right,
-                slot.mCrop.bottom, slot.mTransform, slot.mTimestamp,
-                scalingModeName(slot.mScalingMode)
+        const sp<GraphicBuffer>& buf(slot.mGraphicBuffer);
+        result.appendFormat(
+            "%s%s[%02d:%p] state=%-8s",
+                prefix, (slot.mBufferState == BufferSlot::ACQUIRED)?">":" ", i, buf.get(),
+                stateName(slot.mBufferState)
         );
-        result.append(buffer);
 
-        const sp<GraphicBuffer>& buf(slot.mGraphicBuffer);
         if (buf != NULL) {
-            snprintf(buffer, SIZE,
+            result.appendFormat(
                     ", %p [%4ux%4u:%4u,%3X]",
                     buf->handle, buf->width, buf->height, buf->stride,
                     buf->format);
-            result.append(buffer);
         }
         result.append("\n");
     }
@@ -795,16 +836,13 @@ void BufferQueue::freeBufferLocked(int slot) {
 }
 
 void BufferQueue::freeAllBuffersLocked() {
-    ALOGW_IF(!mQueue.isEmpty(),
-            "freeAllBuffersLocked called but mQueue is not empty");
-    mQueue.clear();
     mBufferHasBeenQueued = false;
     for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
         freeBufferLocked(i);
     }
 }
 
-status_t BufferQueue::acquireBuffer(BufferItem *buffer) {
+status_t BufferQueue::acquireBuffer(BufferItem *buffer, nsecs_t expectedPresent) {
     ATRACE_CALL();
     Mutex::Autolock _l(mMutex);
 
@@ -827,58 +865,153 @@ status_t BufferQueue::acquireBuffer(BufferItem *buffer) {
     // check if queue is empty
     // In asynchronous mode the list is guaranteed to be one buffer
     // deep, while in synchronous mode we use the oldest buffer.
-    if (!mQueue.empty()) {
-        Fifo::iterator front(mQueue.begin());
-        int buf = *front;
+    if (mQueue.empty()) {
+        return NO_BUFFER_AVAILABLE;
+    }
 
-        ATRACE_BUFFER_INDEX(buf);
+    Fifo::iterator front(mQueue.begin());
 
-        if (mSlots[buf].mAcquireCalled) {
-            buffer->mGraphicBuffer = NULL;
-        } else {
-            buffer->mGraphicBuffer = mSlots[buf].mGraphicBuffer;
+    // If expectedPresent is specified, we may not want to return a buffer yet.
+    // If it's specified and there's more than one buffer queued, we may
+    // want to drop a buffer.
+    if (expectedPresent != 0) {
+        const int MAX_REASONABLE_NSEC = 1000000000ULL;  // 1 second
+
+        // The "expectedPresent" argument indicates when the buffer is expected
+        // to be presented on-screen.  If the buffer's desired-present time
+        // is earlier (less) than expectedPresent, meaning it'll be displayed
+        // on time or possibly late if we show it ASAP, we acquire and return
+        // it.  If we don't want to display it until after the expectedPresent
+        // time, we return PRESENT_LATER without acquiring it.
+        //
+        // To be safe, we don't defer acquisition if expectedPresent is
+        // more than one second in the future beyond the desired present time
+        // (i.e. we'd be holding the buffer for a long time).
+        //
+        // NOTE: code assumes monotonic time values from the system clock are
+        // positive.
+
+        // Start by checking to see if we can drop frames.  We skip this check
+        // if the timestamps are being auto-generated by Surface -- if the
+        // app isn't generating timestamps explicitly, they probably don't
+        // want frames to be discarded based on them.
+        while (mQueue.size() > 1 && !mQueue[0].mIsAutoTimestamp) {
+            // If entry[1] is timely, drop entry[0] (and repeat).  We apply
+            // an additional criteria here: we only drop the earlier buffer if
+            // our desiredPresent falls within +/- 1 second of the expected
+            // present.  Otherwise, bogus desiredPresent times (e.g. 0 or
+            // a small relative timestamp), which normally mean "ignore the
+            // timestamp and acquire immediately", would cause us to drop
+            // frames.
+            //
+            // We may want to add an additional criteria: don't drop the
+            // earlier buffer if entry[1]'s fence hasn't signaled yet.
+            //
+            // (Vector front is [0], back is [size()-1])
+            const BufferItem& bi(mQueue[1]);
+            nsecs_t desiredPresent = bi.mTimestamp;
+            if (desiredPresent < expectedPresent - MAX_REASONABLE_NSEC ||
+                    desiredPresent > expectedPresent) {
+                // This buffer is set to display in the near future, or
+                // desiredPresent is garbage.  Either way we don't want to
+                // drop the previous buffer just to get this on screen sooner.
+                ST_LOGV("pts nodrop: des=%lld expect=%lld (%lld) now=%lld",
+                        desiredPresent, expectedPresent, desiredPresent - expectedPresent,
+                        systemTime(CLOCK_MONOTONIC));
+                break;
+            }
+            ST_LOGV("pts drop: queue1des=%lld expect=%lld size=%d",
+                    desiredPresent, expectedPresent, mQueue.size());
+            if (stillTracking(front)) {
+                // front buffer is still in mSlots, so mark the slot as free
+                mSlots[front->mBuf].mBufferState = BufferSlot::FREE;
+            }
+            mQueue.erase(front);
+            front = mQueue.begin();
+        }
+
+        // See if the front buffer is due.
+        nsecs_t desiredPresent = front->mTimestamp;
+        if (desiredPresent > expectedPresent &&
+                desiredPresent < expectedPresent + MAX_REASONABLE_NSEC) {
+            ST_LOGV("pts defer: des=%lld expect=%lld (%lld) now=%lld",
+                    desiredPresent, expectedPresent, desiredPresent - expectedPresent,
+                    systemTime(CLOCK_MONOTONIC));
+            return PRESENT_LATER;
         }
-        buffer->mCrop = mSlots[buf].mCrop;
-        buffer->mTransform = mSlots[buf].mTransform;
-        buffer->mScalingMode = mSlots[buf].mScalingMode;
-        buffer->mFrameNumber = mSlots[buf].mFrameNumber;
-        buffer->mTimestamp = mSlots[buf].mTimestamp;
-        buffer->mBuf = buf;
-        buffer->mFence = mSlots[buf].mFence;
 
+        ST_LOGV("pts accept: des=%lld expect=%lld (%lld) now=%lld",
+                desiredPresent, expectedPresent, desiredPresent - expectedPresent,
+                systemTime(CLOCK_MONOTONIC));
+    }
+
+    int buf = front->mBuf;
+    *buffer = *front;
+    ATRACE_BUFFER_INDEX(buf);
+
+    ST_LOGV("acquireBuffer: acquiring { slot=%d/%llu, buffer=%p }",
+            front->mBuf, front->mFrameNumber,
+            front->mGraphicBuffer->handle);
+    // if front buffer still being tracked update slot state
+    if (stillTracking(front)) {
         mSlots[buf].mAcquireCalled = true;
         mSlots[buf].mNeedsCleanupOnRelease = false;
         mSlots[buf].mBufferState = BufferSlot::ACQUIRED;
         mSlots[buf].mFence = Fence::NO_FENCE;
+    }
 
-        mQueue.erase(front);
-        mDequeueCondition.broadcast();
-
-        ATRACE_INT(mConsumerName.string(), mQueue.size());
-    } else {
-        return NO_BUFFER_AVAILABLE;
+    // If the buffer has previously been acquired by the consumer, set
+    // mGraphicBuffer to NULL to avoid unnecessarily remapping this
+    // buffer on the consumer side.
+    if (buffer->mAcquireCalled) {
+        buffer->mGraphicBuffer = NULL;
     }
 
+    mQueue.erase(front);
+    mDequeueCondition.broadcast();
+
+    ATRACE_INT(mConsumerName.string(), mQueue.size());
+
     return NO_ERROR;
 }
 
-status_t BufferQueue::releaseBuffer(int buf, EGLDisplay display,
+status_t BufferQueue::releaseBuffer(
+        int buf, uint64_t frameNumber, EGLDisplay display,
         EGLSyncKHR eglFence, const sp<Fence>& fence) {
     ATRACE_CALL();
     ATRACE_BUFFER_INDEX(buf);
 
-    Mutex::Autolock _l(mMutex);
-
     if (buf == INVALID_BUFFER_SLOT || fence == NULL) {
         return BAD_VALUE;
     }
 
-    mSlots[buf].mEglDisplay = display;
-    mSlots[buf].mEglFence = eglFence;
-    mSlots[buf].mFence = fence;
+    Mutex::Autolock _l(mMutex);
+
+    // If the frame number has changed because buffer has been reallocated,
+    // we can ignore this releaseBuffer for the old buffer.
+    if (frameNumber != mSlots[buf].mFrameNumber) {
+        return STALE_BUFFER_SLOT;
+    }
+
+
+    // Internal state consistency checks:
+    // Make sure this buffers hasn't been queued while we were owning it (acquired)
+    Fifo::iterator front(mQueue.begin());
+    Fifo::const_iterator const end(mQueue.end());
+    while (front != end) {
+        if (front->mBuf == buf) {
+            LOG_ALWAYS_FATAL("[%s] received new buffer(#%lld) on slot #%d that has not yet been "
+                    "acquired", mConsumerName.string(), frameNumber, buf);
+            break; // never reached
+        }
+        front++;
+    }
 
     // The buffer can now only be released if its in the acquired state
     if (mSlots[buf].mBufferState == BufferSlot::ACQUIRED) {
+        mSlots[buf].mEglDisplay = display;
+        mSlots[buf].mEglFence = eglFence;
+        mSlots[buf].mFence = fence;
         mSlots[buf].mBufferState = BufferSlot::FREE;
     } else if (mSlots[buf].mNeedsCleanupOnRelease) {
         ST_LOGV("releasing a stale buf %d its state was %d", buf, mSlots[buf].mBufferState);
@@ -893,8 +1026,10 @@ status_t BufferQueue::releaseBuffer(int buf, EGLDisplay display,
     return NO_ERROR;
 }
 
-status_t BufferQueue::consumerConnect(const sp<ConsumerListener>& consumerListener) {
-    ST_LOGV("consumerConnect");
+status_t BufferQueue::consumerConnect(const sp<IConsumerListener>& consumerListener,
+        bool controlledByApp) {
+    ST_LOGV("consumerConnect controlledByApp=%s",
+            controlledByApp ? "true" : "false");
     Mutex::Autolock lock(mMutex);
 
     if (mAbandoned) {
@@ -907,6 +1042,7 @@ status_t BufferQueue::consumerConnect(const sp<ConsumerListener>& consumerListen
     }
 
     mConsumerListener = consumerListener;
+    mConsumerControlledByApp = controlledByApp;
 
     return NO_ERROR;
 }
@@ -943,14 +1079,24 @@ status_t BufferQueue::getReleasedBuffers(uint32_t* slotMask) {
             mask |= 1 << i;
         }
     }
+
+    // Remove buffers in flight (on the queue) from the mask where acquire has
+    // been called, as the consumer will not receive the buffer address, so
+    // it should not free these slots.
+    Fifo::iterator front(mQueue.begin());
+    while (front != mQueue.end()) {
+        if (front->mAcquireCalled)
+            mask &= ~(1 << front->mBuf);
+        front++;
+    }
+
     *slotMask = mask;
 
     ST_LOGV("getReleasedBuffers: returning mask %#x", mask);
     return NO_ERROR;
 }
 
-status_t BufferQueue::setDefaultBufferSize(uint32_t w, uint32_t h)
-{
+status_t BufferQueue::setDefaultBufferSize(uint32_t w, uint32_t h) {
     ST_LOGV("setDefaultBufferSize: w=%d, h=%d", w, h);
     if (!w || !h) {
         ST_LOGE("setDefaultBufferSize: dimensions cannot be 0 (w=%d, h=%d)",
@@ -970,6 +1116,17 @@ status_t BufferQueue::setDefaultMaxBufferCount(int bufferCount) {
     return setDefaultMaxBufferCountLocked(bufferCount);
 }
 
+status_t BufferQueue::disableAsyncBuffer() {
+    ATRACE_CALL();
+    Mutex::Autolock lock(mMutex);
+    if (mConsumerListener != NULL) {
+        ST_LOGE("disableAsyncBuffer: consumer already connected!");
+        return INVALID_OPERATION;
+    }
+    mUseAsyncBuffer = false;
+    return NO_ERROR;
+}
+
 status_t BufferQueue::setMaxAcquiredBufferCount(int maxAcquiredBuffers) {
     ATRACE_CALL();
     Mutex::Autolock lock(mMutex);
@@ -985,58 +1142,26 @@ status_t BufferQueue::setMaxAcquiredBufferCount(int maxAcquiredBuffers) {
     return NO_ERROR;
 }
 
-void BufferQueue::freeAllBuffersExceptHeadLocked() {
-    int head = -1;
-    if (!mQueue.empty()) {
-        Fifo::iterator front(mQueue.begin());
-        head = *front;
-    }
-    mBufferHasBeenQueued = false;
-    for (int i = 0; i < NUM_BUFFER_SLOTS; i++) {
-        if (i != head) {
-            freeBufferLocked(i);
-        }
-    }
-}
-
-status_t BufferQueue::drainQueueLocked() {
-    while (mSynchronousMode && mQueue.size() > 1) {
-        mDequeueCondition.wait(mMutex);
-        if (mAbandoned) {
-            ST_LOGE("drainQueueLocked: BufferQueue has been abandoned!");
-            return NO_INIT;
-        }
-        if (mConnectedApi == NO_CONNECTED_API) {
-            ST_LOGE("drainQueueLocked: BufferQueue is not connected!");
-            return NO_INIT;
-        }
-    }
-    return NO_ERROR;
-}
+int BufferQueue::getMinUndequeuedBufferCount(bool async) const {
+    // if dequeueBuffer is allowed to error out, we don't have to
+    // add an extra buffer.
+    if (!mUseAsyncBuffer)
+        return mMaxAcquiredBufferCount;
 
-status_t BufferQueue::drainQueueAndFreeBuffersLocked() {
-    status_t err = drainQueueLocked();
-    if (err == NO_ERROR) {
-        if (mQueue.empty()) {
-            freeAllBuffersLocked();
-        } else {
-            freeAllBuffersExceptHeadLocked();
-        }
-    }
-    return err;
-}
+    // we're in async mode, or we want to prevent the app to
+    // deadlock itself, we throw-in an extra buffer to guarantee it.
+    if (mDequeueBufferCannotBlock || async)
+        return mMaxAcquiredBufferCount+1;
 
-int BufferQueue::getMinMaxBufferCountLocked() const {
-    return getMinUndequeuedBufferCountLocked() + 1;
+    return mMaxAcquiredBufferCount;
 }
 
-int BufferQueue::getMinUndequeuedBufferCountLocked() const {
-    return mSynchronousMode ? mMaxAcquiredBufferCount :
-            mMaxAcquiredBufferCount + 1;
+int BufferQueue::getMinMaxBufferCountLocked(bool async) const {
+    return getMinUndequeuedBufferCount(async) + 1;
 }
 
-int BufferQueue::getMaxBufferCountLocked() const {
-    int minMaxBufferCount = getMinMaxBufferCountLocked();
+int BufferQueue::getMaxBufferCountLocked(bool async) const {
+    int minMaxBufferCount = getMinMaxBufferCountLocked(async);
 
     int maxBufferCount = mDefaultMaxBufferCount;
     if (maxBufferCount < minMaxBufferCount) {
@@ -1061,21 +1186,37 @@ int BufferQueue::getMaxBufferCountLocked() const {
     return maxBufferCount;
 }
 
+bool BufferQueue::stillTracking(const BufferItem *item) const {
+    const BufferSlot &slot = mSlots[item->mBuf];
+
+    ST_LOGV("stillTracking?: item: { slot=%d/%llu, buffer=%p }, "
+            "slot: { slot=%d/%llu, buffer=%p }",
+            item->mBuf, item->mFrameNumber,
+            (item->mGraphicBuffer.get() ? item->mGraphicBuffer->handle : 0),
+            item->mBuf, slot.mFrameNumber,
+            (slot.mGraphicBuffer.get() ? slot.mGraphicBuffer->handle : 0));
+
+    // Compare item with its original buffer slot.  We can check the slot
+    // as the buffer would not be moved to a different slot by the producer.
+    return (slot.mGraphicBuffer != NULL &&
+            item->mGraphicBuffer->handle == slot.mGraphicBuffer->handle);
+}
+
 BufferQueue::ProxyConsumerListener::ProxyConsumerListener(
-        const wp<BufferQueue::ConsumerListener>& consumerListener):
+        const wp<ConsumerListener>& consumerListener):
         mConsumerListener(consumerListener) {}
 
 BufferQueue::ProxyConsumerListener::~ProxyConsumerListener() {}
 
 void BufferQueue::ProxyConsumerListener::onFrameAvailable() {
-    sp<BufferQueue::ConsumerListener> listener(mConsumerListener.promote());
+    sp<ConsumerListener> listener(mConsumerListener.promote());
     if (listener != NULL) {
         listener->onFrameAvailable();
     }
 }
 
 void BufferQueue::ProxyConsumerListener::onBuffersReleased() {
-    sp<BufferQueue::ConsumerListener> listener(mConsumerListener.promote());
+    sp<ConsumerListener> listener(mConsumerListener.promote());
     if (listener != NULL) {
         listener->onBuffersReleased();
     }