From 97350f9df7252c881f011a410fcd9e6d766d2bee Mon Sep 17 00:00:00 2001 From: Phil Burk Date: Fri, 21 Jul 2017 15:59:44 -0700 Subject: [PATCH] aaudio: improve accuracy of timestamps Account for latency added by the AAudio service. Fix input timestamps. Bug: 37080396 Test: test_timestamps.cpp input_monitor.cpp Change-Id: I1053cd21af722bb9b9371df4e5731bf4a0a57b0b --- .../examples/input_monitor/src/input_monitor.cpp | 39 ++++- .../libaaudio/examples/utils/AAudioExampleUtils.h | 24 ++- media/libaaudio/src/binding/AAudioServiceMessage.h | 4 +- media/libaaudio/src/client/AudioStreamInternal.cpp | 32 +++- media/libaaudio/src/client/AudioStreamInternal.h | 9 +- media/libaaudio/src/utility/AAudioUtilities.h | 70 ++++++++ media/libaaudio/tests/Android.mk | 11 ++ media/libaaudio/tests/test_timestamps.cpp | 188 +++++++++++++++++++++ services/oboeservice/AAudioServiceEndpoint.cpp | 5 +- services/oboeservice/AAudioServiceEndpoint.h | 2 + .../oboeservice/AAudioServiceEndpointCapture.cpp | 27 ++- services/oboeservice/AAudioServiceEndpointPlay.cpp | 24 ++- services/oboeservice/AAudioServiceStreamBase.cpp | 23 ++- services/oboeservice/AAudioServiceStreamBase.h | 7 +- services/oboeservice/AAudioServiceStreamMMAP.cpp | 31 +++- services/oboeservice/AAudioServiceStreamMMAP.h | 3 + services/oboeservice/AAudioServiceStreamShared.cpp | 30 +++- services/oboeservice/AAudioServiceStreamShared.h | 12 +- 18 files changed, 494 insertions(+), 47 deletions(-) create mode 100644 media/libaaudio/tests/test_timestamps.cpp diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp index edf644adf4..2dfd0a7e76 100644 --- a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp +++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp @@ -27,9 +27,11 @@ #include "AAudioSimpleRecorder.h" // TODO support FLOAT -#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16 +#define REQUIRED_FORMAT AAUDIO_FORMAT_PCM_I16 #define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */ +static const int FRAMES_PER_LINE = 20000; + int main(int argc, const char **argv) { AAudioArgsParser argParser; @@ -46,7 +48,10 @@ int main(int argc, const char **argv) int32_t framesPerRead = 0; int32_t framesToRecord = 0; int32_t framesLeft = 0; + int32_t nextFrameCount = 0; + int32_t frameCount = 0; int32_t xRunCount = 0; + int64_t previousFramePosition = -1; int16_t *data = nullptr; float peakLevel = 0.0; int loopCounter = 0; @@ -56,7 +61,7 @@ int main(int argc, const char **argv) // in a buffer if we hang or crash. setvbuf(stdout, nullptr, _IONBF, (size_t) 0); - printf("%s - Monitor input level using AAudio\n", argv[0]); + printf("%s - Monitor input level using AAudio V0.1.1\n", argv[0]); argParser.setFormat(REQUIRED_FORMAT); if (argParser.parseArgs(argc, argv)) { @@ -133,6 +138,7 @@ int main(int argc, const char **argv) goto finish; } framesLeft -= actual; + frameCount += actual; // Peak finder. for (int frameIndex = 0; frameIndex < actual; frameIndex++) { @@ -143,9 +149,36 @@ int main(int argc, const char **argv) } // Display level as stars, eg. "******". - if ((loopCounter++ % 10) == 0) { + if (frameCount > nextFrameCount) { displayPeakLevel(peakLevel); peakLevel = 0.0; + nextFrameCount += FRAMES_PER_LINE; + } + + // Print timestamps. + int64_t framePosition = 0; + int64_t frameTime = 0; + aaudio_result_t timeResult; + timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC, + &framePosition, &frameTime); + + if (timeResult == AAUDIO_OK) { + if (framePosition > (previousFramePosition + FRAMES_PER_LINE)) { + int64_t realTime = getNanoseconds(); + int64_t framesRead = AAudioStream_getFramesRead(aaudioStream); + + double latencyMillis = calculateLatencyMillis(framesRead, realTime, + framePosition, frameTime, + actualSampleRate); + + printf("--- timestamp: result = %4d, position = %lld, at %lld nanos" + ", latency = %7.2f msec\n", + timeResult, + (long long) framePosition, + (long long) frameTime, + latencyMillis); + previousFramePosition = framePosition; + } } } diff --git a/media/libaaudio/examples/utils/AAudioExampleUtils.h b/media/libaaudio/examples/utils/AAudioExampleUtils.h index 66de25f79a..6cbcc58c67 100644 --- a/media/libaaudio/examples/utils/AAudioExampleUtils.h +++ b/media/libaaudio/examples/utils/AAudioExampleUtils.h @@ -25,7 +25,7 @@ #define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000) #define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000) -static const char *getSharingModeText(aaudio_sharing_mode_t mode) { +const char *getSharingModeText(aaudio_sharing_mode_t mode) { const char *modeText = "unknown"; switch (mode) { case AAUDIO_SHARING_MODE_EXCLUSIVE: @@ -49,7 +49,7 @@ static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) { return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec; } -void displayPeakLevel(float peakLevel) { +static void displayPeakLevel(float peakLevel) { printf("%5.3f ", peakLevel); const int maxStars = 50; // arbitrary, fits on one line int numStars = (int) (peakLevel * maxStars); @@ -59,4 +59,24 @@ void displayPeakLevel(float peakLevel) { printf("\n"); } +/** + * @param position1 position of hardware frame + * @param nanoseconds1 + * @param position2 position of client read/write + * @param nanoseconds2 + * @param sampleRate + * @return latency in milliseconds + */ +static double calculateLatencyMillis(int64_t position1, int64_t nanoseconds1, + int64_t position2, int64_t nanoseconds2, + int64_t sampleRate) { + int64_t deltaFrames = position2 - position1; + int64_t deltaTime = + (NANOS_PER_SECOND * deltaFrames / sampleRate); + int64_t timeCurrentFramePlayed = nanoseconds1 + deltaTime; + int64_t latencyNanos = timeCurrentFramePlayed - nanoseconds2; + double latencyMillis = latencyNanos / 1000000.0; + return latencyMillis; +} + #endif // AAUDIO_EXAMPLE_UTILS_H diff --git a/media/libaaudio/src/binding/AAudioServiceMessage.h b/media/libaaudio/src/binding/AAudioServiceMessage.h index b4377fbed0..54e8001d78 100644 --- a/media/libaaudio/src/binding/AAudioServiceMessage.h +++ b/media/libaaudio/src/binding/AAudioServiceMessage.h @@ -28,7 +28,6 @@ namespace aaudio { // Used to send information about the HAL to the client. struct AAudioMessageTimestamp { int64_t position; // number of frames transferred so far - int64_t deviceOffset; // add to client position to get device position int64_t timestamp; // time when that position was reached }; @@ -51,7 +50,8 @@ struct AAudioMessageEvent { typedef struct AAudioServiceMessage_s { enum class code : uint32_t { NOTHING, - TIMESTAMP, + TIMESTAMP_SERVICE, // when frame is read or written by the service to the client + TIMESTAMP_HARDWARE, // when frame is at DAC or ADC EVENT, }; diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp index 4c7d0f79ea..259c9b9808 100644 --- a/media/libaaudio/src/client/AudioStreamInternal.cpp +++ b/media/libaaudio/src/client/AudioStreamInternal.cpp @@ -68,6 +68,7 @@ AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterfa , mServiceInterface(serviceInterface) , mWakeupDelayNanos(AAudioProperty_getWakeupDelayMicros() * AAUDIO_NANOS_PER_MICROSECOND) , mMinimumSleepNanos(AAudioProperty_getMinimumSleepMicros() * AAUDIO_NANOS_PER_MICROSECOND) + , mAtomicTimestamp() { ALOGD("AudioStreamInternal(): mWakeupDelayNanos = %d, mMinimumSleepNanos = %d", mWakeupDelayNanos, mMinimumSleepNanos); @@ -351,12 +352,15 @@ aaudio_result_t AudioStreamInternal::stopClient(audio_port_handle_t clientHandle aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId, int64_t *framePosition, int64_t *timeNanoseconds) { - // TODO Generate in server and pass to client. Return latest. - int64_t time = AudioClock::getNanoseconds(); - *framePosition = mClockModel.convertTimeToPosition(time) + mFramesOffsetFromService; - // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor. - *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND); - return AAUDIO_OK; + // Generated in server and passed to client. Return latest. + if (mAtomicTimestamp.isValid()) { + Timestamp timestamp = mAtomicTimestamp.read(); + *framePosition = timestamp.getPosition(); + *timeNanoseconds = timestamp.getNanoseconds(); + return AAUDIO_OK; + } else { + return AAUDIO_ERROR_UNAVAILABLE; + } } aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() { @@ -385,7 +389,7 @@ void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) { oldTime = nanoTime; } -aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) { +aaudio_result_t AudioStreamInternal::onTimestampService(AAudioServiceMessage *message) { #if LOG_TIMESTAMPS logTimestamp(*message); #endif @@ -393,6 +397,12 @@ aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage return AAUDIO_OK; } +aaudio_result_t AudioStreamInternal::onTimestampHardware(AAudioServiceMessage *message) { + Timestamp timestamp(message->timestamp.position, message->timestamp.timestamp); + mAtomicTimestamp.write(timestamp); + return AAUDIO_OK; +} + aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) { aaudio_result_t result = AAUDIO_OK; switch (message->event.event) { @@ -456,8 +466,12 @@ aaudio_result_t AudioStreamInternal::processCommands() { break; // no command this time, no problem } switch (message.what) { - case AAudioServiceMessage::code::TIMESTAMP: - result = onTimestampFromServer(&message); + case AAudioServiceMessage::code::TIMESTAMP_SERVICE: + result = onTimestampService(&message); + break; + + case AAudioServiceMessage::code::TIMESTAMP_HARDWARE: + result = onTimestampHardware(&message); break; case AAudioServiceMessage::code::EVENT: diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h index 1b991de2ab..607e734771 100644 --- a/media/libaaudio/src/client/AudioStreamInternal.h +++ b/media/libaaudio/src/client/AudioStreamInternal.h @@ -122,7 +122,9 @@ protected: aaudio_result_t onEventFromServer(AAudioServiceMessage *message); - aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message); + aaudio_result_t onTimestampService(AAudioServiceMessage *message); + + aaudio_result_t onTimestampHardware(AAudioServiceMessage *message); void logTimestamp(AAudioServiceMessage &message); @@ -181,6 +183,11 @@ private: AudioEndpointParcelable mEndPointParcelable; // description of the buffers filled by service EndpointDescriptor mEndpointDescriptor; // buffer description with resolved addresses + + SimpleDoubleBuffer mAtomicTimestamp; + + int64_t mServiceLatencyNanos = 0; + }; } /* namespace aaudio */ diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h index acd319bf98..b0c6c9409f 100644 --- a/media/libaaudio/src/utility/AAudioUtilities.h +++ b/media/libaaudio/src/utility/AAudioUtilities.h @@ -258,4 +258,74 @@ static inline bool AAudio_tryUntilTrue( } } + +/** + * Simple double buffer for a structure that can be written occasionally and read occasionally. + * This allows a SINGLE writer with multiple readers. + * + * It is OK if the FIFO overflows and we lose old values. + * It is also OK if we read an old value. + * Thread may return a non-atomic result if the other thread is rapidly writing + * new values on another core. + */ +template +class SimpleDoubleBuffer { +public: + SimpleDoubleBuffer() + : mValues() + , mCounter(0) {} + + __attribute__((no_sanitize("integer"))) + void write(T value) { + int index = mCounter.load() & 1; + mValues[index] = value; + mCounter++; // Increment AFTER updating storage, OK if it wraps. + } + + T read() const { + T result; + int before; + int after; + int timeout = 3; + do { + // Check to see if a write occurred while were reading. + before = mCounter.load(); + int index = (before & 1) ^ 1; + result = mValues[index]; + after = mCounter.load(); + } while ((after != before) && --timeout > 0); + return result; + } + + /** + * @return true if at least one value has been written + */ + bool isValid() const { + return mCounter.load() > 0; + } + +private: + T mValues[2]; + std::atomic mCounter; +}; + +class Timestamp { +public: + Timestamp() + : mPosition(0) + , mNanoseconds(0) {} + Timestamp(int64_t position, int64_t nanoseconds) + : mPosition(position) + , mNanoseconds(nanoseconds) {} + + int64_t getPosition() const { return mPosition; } + + int64_t getNanoseconds() const { return mNanoseconds; } + +private: + // These cannot be const because we need to implement the copy assignment operator. + int64_t mPosition; + int64_t mNanoseconds; +}; + #endif //UTILITY_AAUDIO_UTILITIES_H diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk index e4eef06354..44029191c3 100644 --- a/media/libaaudio/tests/Android.mk +++ b/media/libaaudio/tests/Android.mk @@ -34,6 +34,17 @@ include $(CLEAR_VARS) LOCAL_C_INCLUDES := \ $(call include-path-for, audio-utils) \ frameworks/av/media/libaaudio/include \ + frameworks/av/media/libaaudio/src \ + frameworks/av/media/libaaudio/examples +LOCAL_SRC_FILES:= test_timestamps.cpp +LOCAL_SHARED_LIBRARIES := libaaudio +LOCAL_MODULE := test_timestamps +include $(BUILD_NATIVE_TEST) + +include $(CLEAR_VARS) +LOCAL_C_INCLUDES := \ + $(call include-path-for, audio-utils) \ + frameworks/av/media/libaaudio/include \ frameworks/av/media/libaaudio/src LOCAL_SRC_FILES:= test_linear_ramp.cpp LOCAL_SHARED_LIBRARIES := libaaudio diff --git a/media/libaaudio/tests/test_timestamps.cpp b/media/libaaudio/tests/test_timestamps.cpp new file mode 100644 index 0000000000..d9ca391887 --- /dev/null +++ b/media/libaaudio/tests/test_timestamps.cpp @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Play silence and recover from dead servers or disconnected devices. + +#include +#include + +#include +#include + +#include "utils/AAudioExampleUtils.h" + +#define DEFAULT_TIMEOUT_NANOS ((int64_t)1000000000) + +int main(int argc, char **argv) { + (void) argc; + (void *)argv; + + aaudio_result_t result = AAUDIO_OK; + + int32_t triesLeft = 3; + int32_t bufferCapacity; + int32_t framesPerBurst = 0; + float *buffer = nullptr; + + int32_t actualChannelCount = 0; + int32_t actualSampleRate = 0; + int32_t originalBufferSize = 0; + int32_t requestedBufferSize = 0; + int32_t finalBufferSize = 0; + aaudio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_FLOAT; + aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED; + int32_t framesMax; + int64_t framesTotal; + int64_t printAt; + int samplesPerBurst; + int64_t previousFramePosition = -1; + + AAudioStreamBuilder *aaudioBuilder = nullptr; + AAudioStream *aaudioStream = nullptr; + + // Make printf print immediately so that debug info is not stuck + // in a buffer if we hang or crash. + setvbuf(stdout, nullptr, _IONBF, (size_t) 0); + + printf("Test Timestamps V0.1.1\n"); + + AAudio_setMMapPolicy(AAUDIO_POLICY_AUTO); + + // Use an AAudioStreamBuilder to contain requested parameters. + result = AAudio_createStreamBuilder(&aaudioBuilder); + if (result != AAUDIO_OK) { + printf("AAudio_createStreamBuilder returned %s", + AAudio_convertResultToText(result)); + goto finish; + } + + // Request stream properties. + AAudioStreamBuilder_setFormat(aaudioBuilder, AAUDIO_FORMAT_PCM_FLOAT); + //AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_NONE); + AAudioStreamBuilder_setPerformanceMode(aaudioBuilder, AAUDIO_PERFORMANCE_MODE_LOW_LATENCY); + + // Create an AAudioStream using the Builder. + result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream); + if (result != AAUDIO_OK) { + printf("AAudioStreamBuilder_openStream returned %s", + AAudio_convertResultToText(result)); + goto finish; + } + + // Check to see what kind of stream we actually got. + actualSampleRate = AAudioStream_getSampleRate(aaudioStream); + actualChannelCount = AAudioStream_getChannelCount(aaudioStream); + actualDataFormat = AAudioStream_getFormat(aaudioStream); + + printf("-------- chans = %3d, rate = %6d format = %d\n", + actualChannelCount, actualSampleRate, actualDataFormat); + printf(" Is MMAP used? %s\n", AAudioStream_isMMapUsed(aaudioStream) + ? "yes" : "no"); + + // This is the number of frames that are read in one chunk by a DMA controller + // or a DSP or a mixer. + framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream); + printf(" framesPerBurst = %3d\n", framesPerBurst); + + originalBufferSize = AAudioStream_getBufferSizeInFrames(aaudioStream); + requestedBufferSize = 2 * framesPerBurst; + finalBufferSize = AAudioStream_setBufferSizeInFrames(aaudioStream, requestedBufferSize); + + printf(" BufferSize: original = %4d, requested = %4d, final = %4d\n", + originalBufferSize, requestedBufferSize, finalBufferSize); + + samplesPerBurst = framesPerBurst * actualChannelCount; + buffer = new float[samplesPerBurst]; + + result = AAudioStream_requestStart(aaudioStream); + if (result != AAUDIO_OK) { + printf("AAudioStream_requestStart returned %s", + AAudio_convertResultToText(result)); + goto finish; + } + + // Play silence very briefly. + framesMax = actualSampleRate * 4; + framesTotal = 0; + printAt = actualSampleRate; + while (result == AAUDIO_OK && framesTotal < framesMax) { + int32_t framesWritten = AAudioStream_write(aaudioStream, + buffer, framesPerBurst, + DEFAULT_TIMEOUT_NANOS); + if (framesWritten < 0) { + result = framesWritten; + printf("write() returned %s, frames = %d\n", + AAudio_convertResultToText(result), (int)framesTotal); + printf(" frames = %d\n", (int)framesTotal); + } else if (framesWritten != framesPerBurst) { + printf("write() returned %d, frames = %d\n", framesWritten, (int)framesTotal); + result = AAUDIO_ERROR_TIMEOUT; + } else { + framesTotal += framesWritten; + if (framesTotal >= printAt) { + printf("frames = %d\n", (int)framesTotal); + printAt += actualSampleRate; + } + } + + // Print timestamps. + int64_t framePosition = 0; + int64_t frameTime = 0; + aaudio_result_t timeResult; + timeResult = AAudioStream_getTimestamp(aaudioStream, CLOCK_MONOTONIC, + &framePosition, &frameTime); + + if (timeResult == AAUDIO_OK) { + if (framePosition > (previousFramePosition + 5000)) { + int64_t realTime = getNanoseconds(); + int64_t framesWritten = AAudioStream_getFramesWritten(aaudioStream); + + double latencyMillis = calculateLatencyMillis(framePosition, frameTime, + framesWritten, realTime, + actualSampleRate); + + printf("--- timestamp: result = %4d, position = %lld, at %lld nanos" + ", latency = %7.2f msec\n", + timeResult, + (long long) framePosition, + (long long) frameTime, + latencyMillis); + previousFramePosition = framePosition; + } + } + } + + result = AAudioStream_requestStop(aaudioStream); + if (result != AAUDIO_OK) { + printf("AAudioStream_requestStop returned %s\n", + AAudio_convertResultToText(result)); + } + result = AAudioStream_close(aaudioStream); + if (result != AAUDIO_OK) { + printf("AAudioStream_close returned %s\n", + AAudio_convertResultToText(result)); + } + aaudioStream = nullptr; + + +finish: + if (aaudioStream != nullptr) { + AAudioStream_close(aaudioStream); + } + AAudioStreamBuilder_delete(aaudioBuilder); + delete[] buffer; + printf("result = %d = %s\n", result, AAudio_convertResultToText(result)); +} diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp index 0f863fe160..6c345cd1f5 100644 --- a/services/oboeservice/AAudioServiceEndpoint.cpp +++ b/services/oboeservice/AAudioServiceEndpoint.cpp @@ -177,7 +177,10 @@ bool AAudioServiceEndpoint::matches(const AAudioStreamConfiguration& configurati configuration.getSamplesPerFrame() != mStreamInternal->getSamplesPerFrame()) { return false; } - return true; } + +aaudio_result_t AAudioServiceEndpoint::getTimestamp(int64_t *positionFrames, int64_t *timeNanos) { + return mStreamInternal->getTimestamp(CLOCK_MONOTONIC, positionFrames, timeNanos); +} diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h index e40a6708ab..603d497106 100644 --- a/services/oboeservice/AAudioServiceEndpoint.h +++ b/services/oboeservice/AAudioServiceEndpoint.h @@ -69,6 +69,8 @@ public: mReferenceCount = count; } + aaudio_result_t getTimestamp(int64_t *positionFrames, int64_t *timeNanos); + bool matches(const AAudioStreamConfiguration& configuration); virtual AudioStreamInternal *getStreamInternal() = 0; diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp index 6a373308a4..6504cc1838 100644 --- a/services/oboeservice/AAudioServiceEndpointCapture.cpp +++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp @@ -62,6 +62,9 @@ void *AAudioServiceEndpointCapture::callbackLoop() { // result might be a frame count while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) { + + int64_t mmapFramesRead = getStreamInternal()->getFramesRead(); + // Read audio data from stream using a blocking read. result = getStreamInternal()->read(mDistributionBuffer, getFramesPerBurst(), timeoutNanos); if (result == AAUDIO_ERROR_DISCONNECTED) { @@ -74,18 +77,32 @@ void *AAudioServiceEndpointCapture::callbackLoop() { } // Distribute data to each active stream. - { // use lock guard + { // brackets are for lock_guard + std::lock_guard lock(mLockStreams); - for (sp sharedStream : mRegisteredStreams) { - if (sharedStream->isRunning()) { - FifoBuffer *fifo = sharedStream->getDataFifoBuffer(); + for (sp clientStream : mRegisteredStreams) { + if (clientStream->isRunning()) { + FifoBuffer *fifo = clientStream->getDataFifoBuffer(); + + // Determine offset between framePosition in client's stream vs the underlying + // MMAP stream. + int64_t clientFramesWritten = fifo->getWriteCounter(); + // There are two indices that refer to the same frame. + int64_t positionOffset = mmapFramesRead - clientFramesWritten; + clientStream->setTimestampPositionOffset(positionOffset); + if (fifo->getFifoControllerBase()->getEmptyFramesAvailable() < getFramesPerBurst()) { underflowCount++; } else { fifo->write(mDistributionBuffer, getFramesPerBurst()); } - sharedStream->markTransferTime(AudioClock::getNanoseconds()); + + // This timestamp represents the completion of data being written into the + // client buffer. It is sent to the client and used in the timing model + // to decide when data will be available to read. + Timestamp timestamp(fifo->getWriteCounter(), AudioClock::getNanoseconds()); + clientStream->markTransferTime(timestamp); } } } diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp index b83b918798..20cc5b85ba 100644 --- a/services/oboeservice/AAudioServiceEndpointPlay.cpp +++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp @@ -73,17 +73,31 @@ void *AAudioServiceEndpointPlay::callbackLoop() { while (mCallbackEnabled.load() && getStreamInternal()->isActive() && (result >= 0)) { // Mix data from each active stream. mMixer.clear(); - { // use lock guard + { // brackets are for lock_guard int index = 0; + int64_t mmapFramesWritten = getStreamInternal()->getFramesWritten(); + std::lock_guard lock(mLockStreams); - for (sp sharedStream : mRegisteredStreams) { - if (sharedStream->isRunning()) { - FifoBuffer *fifo = sharedStream->getDataFifoBuffer(); + for (sp clientStream : mRegisteredStreams) { + if (clientStream->isRunning()) { + FifoBuffer *fifo = clientStream->getDataFifoBuffer(); + // Determine offset between framePosition in client's stream vs the underlying + // MMAP stream. + int64_t clientFramesRead = fifo->getReadCounter(); + // These two indices refer to the same frame. + int64_t positionOffset = mmapFramesWritten - clientFramesRead; + clientStream->setTimestampPositionOffset(positionOffset); + float volume = 1.0; // to match legacy volume bool underflowed = mMixer.mix(index, fifo, volume); underflowCount += underflowed ? 1 : 0; // TODO log underflows in each stream - sharedStream->markTransferTime(AudioClock::getNanoseconds()); + + // This timestamp represents the completion of data being read out of the + // client buffer. It is sent to the client and used in the timing model + // to decide when the client has room to write more data. + Timestamp timestamp(fifo->getReadCounter(), AudioClock::getNanoseconds()); + clientStream->markTransferTime(timestamp); } index++; } diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp index 5f7d1792cd..3fc5957fe1 100644 --- a/services/oboeservice/AAudioServiceStreamBase.cpp +++ b/services/oboeservice/AAudioServiceStreamBase.cpp @@ -37,7 +37,8 @@ using namespace aaudio; // TODO just import names needed AAudioServiceStreamBase::AAudioServiceStreamBase() : mUpMessageQueue(nullptr) - , mAAudioThread() { + , mAAudioThread() + , mAtomicTimestamp() { mMmapClient.clientUid = -1; mMmapClient.clientPid = -1; mMmapClient.packageName = String16(""); @@ -211,15 +212,25 @@ aaudio_result_t AAudioServiceStreamBase::writeUpMessageQueue(AAudioServiceMessag aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp() { AAudioServiceMessage command; + // Send a timestamp for the clock model. aaudio_result_t result = getFreeRunningPosition(&command.timestamp.position, &command.timestamp.timestamp); if (result == AAUDIO_OK) { - // ALOGD("sendCurrentTimestamp(): position = %lld, nanos = %lld", - // (long long) command.timestamp.position, - // (long long) command.timestamp.timestamp); - command.what = AAudioServiceMessage::code::TIMESTAMP; + command.what = AAudioServiceMessage::code::TIMESTAMP_SERVICE; result = writeUpMessageQueue(&command); - } else if (result == AAUDIO_ERROR_UNAVAILABLE) { + + if (result == AAUDIO_OK) { + // Send a hardware timestamp for presentation time. + result = getHardwareTimestamp(&command.timestamp.position, + &command.timestamp.timestamp); + if (result == AAUDIO_OK) { + command.what = AAudioServiceMessage::code::TIMESTAMP_HARDWARE; + result = writeUpMessageQueue(&command); + } + } + } + + if (result == AAUDIO_ERROR_UNAVAILABLE) { result = AAUDIO_OK; // just not available yet, try again later } return result; diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h index cebefec08e..e91ea8207e 100644 --- a/services/oboeservice/AAudioServiceStreamBase.h +++ b/services/oboeservice/AAudioServiceStreamBase.h @@ -20,6 +20,7 @@ #include #include +#include #include #include "fifo/FifoBuffer.h" @@ -27,7 +28,7 @@ #include "binding/AudioEndpointParcelable.h" #include "binding/AAudioServiceMessage.h" #include "utility/AAudioUtilities.h" -#include +#include "utility/AudioClock.h" #include "SharedRingBuffer.h" #include "AAudioThread.h" @@ -170,6 +171,8 @@ protected: */ virtual aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) = 0; + virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, int64_t *timeNanos) = 0; + virtual aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) = 0; aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED; @@ -191,6 +194,8 @@ protected: android::AudioClient mMmapClient; audio_port_handle_t mClientHandle = AUDIO_PORT_HANDLE_NONE; + SimpleDoubleBuffer mAtomicTimestamp; + private: aaudio_handle_t mHandle = -1; }; diff --git a/services/oboeservice/AAudioServiceStreamMMAP.cpp b/services/oboeservice/AAudioServiceStreamMMAP.cpp index ff02c0f8ed..970d734e60 100644 --- a/services/oboeservice/AAudioServiceStreamMMAP.cpp +++ b/services/oboeservice/AAudioServiceStreamMMAP.cpp @@ -37,6 +37,11 @@ using namespace aaudio; #define AAUDIO_BUFFER_CAPACITY_MIN 4 * 512 #define AAUDIO_SAMPLE_RATE_DEFAULT 48000 +// This is an estimate of the time difference between the HW and the MMAP time. +// TODO Get presentation timestamps from the HAL instead of using these estimates. +#define OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (3 * AAUDIO_NANOS_PER_MILLISECOND) +#define INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS (-1 * AAUDIO_NANOS_PER_MILLISECOND) + /** * Service Stream that uses an MMAP buffer. */ @@ -113,10 +118,14 @@ aaudio_result_t AAudioServiceStreamMMAP::open(const aaudio::AAudioStreamRequest config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED) ? AUDIO_CHANNEL_OUT_STEREO : audio_channel_out_mask_from_count(aaudioSamplesPerFrame); + mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later + } else if (direction == AAUDIO_DIRECTION_INPUT) { config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED) ? AUDIO_CHANNEL_IN_STEREO : audio_channel_in_mask_from_count(aaudioSamplesPerFrame); + mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier + } else { ALOGE("openMmapStream - invalid direction = %d", direction); return AAUDIO_ERROR_ILLEGAL_ARGUMENT; @@ -289,6 +298,7 @@ aaudio_result_t AAudioServiceStreamMMAP::stopClient(audio_port_handle_t clientHa return AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle)); } +// Get free-running DSP or DMA hardware position from the HAL. aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) { struct audio_mmap_position position; @@ -305,12 +315,29 @@ aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition(int64_t *positio disconnect(); } else { mFramesRead.update32(position.position_frames); - *positionFrames = mFramesRead.get(); - *timeNanos = position.time_nanoseconds; + + Timestamp timestamp(mFramesRead.get(), position.time_nanoseconds); + mAtomicTimestamp.write(timestamp); + *positionFrames = timestamp.getPosition(); + *timeNanos = timestamp.getNanoseconds(); } return result; } +// Get timestamp that was written by getFreeRunningPosition() +aaudio_result_t AAudioServiceStreamMMAP::getHardwareTimestamp(int64_t *positionFrames, + int64_t *timeNanos) { + // TODO Get presentation timestamp from the HAL + if (mAtomicTimestamp.isValid()) { + Timestamp timestamp = mAtomicTimestamp.read(); + *positionFrames = timestamp.getPosition(); + *timeNanos = timestamp.getNanoseconds() + mHardwareTimeOffsetNanos; + return AAUDIO_OK; + } else { + return AAUDIO_ERROR_UNAVAILABLE; + } +} + void AAudioServiceStreamMMAP::onTearDown() { ALOGD("AAudioServiceStreamMMAP::onTearDown() called"); disconnect(); diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h index 533e5a8ad0..e6f8fade3b 100644 --- a/services/oboeservice/AAudioServiceStreamMMAP.h +++ b/services/oboeservice/AAudioServiceStreamMMAP.h @@ -100,6 +100,8 @@ protected: aaudio_result_t getDownDataDescription(AudioEndpointParcelable &parcelable) override; aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override; + virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, + int64_t *timeNanos) override; private: // This proxy class was needed to prevent a crash in AudioFlinger @@ -132,6 +134,7 @@ private: MonotonicCounter mFramesRead; int32_t mPreviousFrameCounter = 0; // from HAL int mAudioDataFileDescriptor = -1; + int64_t mHardwareTimeOffsetNanos = 0; // TODO get from HAL // Interface to the AudioFlinger MMAP support. android::sp mMmapStream; diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp index 5654113208..07c4faf9fb 100644 --- a/services/oboeservice/AAudioServiceStreamShared.cpp +++ b/services/oboeservice/AAudioServiceStreamShared.cpp @@ -41,6 +41,7 @@ using namespace aaudio; AAudioServiceStreamShared::AAudioServiceStreamShared(AAudioService &audioService) : mAudioService(audioService) + , mTimestampPositionOffset(0) { } @@ -307,15 +308,30 @@ aaudio_result_t AAudioServiceStreamShared::getDownDataDescription(AudioEndpointP return AAUDIO_OK; } -void AAudioServiceStreamShared::markTransferTime(int64_t nanoseconds) { - mMarkedPosition = mAudioDataQueue->getFifoBuffer()->getReadCounter(); - mMarkedTime = nanoseconds; +void AAudioServiceStreamShared::markTransferTime(Timestamp ×tamp) { + mAtomicTimestamp.write(timestamp); } +// Get timestamp that was written by the real-time service thread, eg. mixer. aaudio_result_t AAudioServiceStreamShared::getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) { - // TODO get these two numbers as an atomic pair - *positionFrames = mMarkedPosition; - *timeNanos = mMarkedTime; - return AAUDIO_OK; + if (mAtomicTimestamp.isValid()) { + Timestamp timestamp = mAtomicTimestamp.read(); + *positionFrames = timestamp.getPosition(); + *timeNanos = timestamp.getNanoseconds(); + return AAUDIO_OK; + } else { + return AAUDIO_ERROR_UNAVAILABLE; + } +} + +// Get timestamp from lower level service. +aaudio_result_t AAudioServiceStreamShared::getHardwareTimestamp(int64_t *positionFrames, + int64_t *timeNanos) { + + aaudio_result_t result = mServiceEndpoint->getTimestamp(positionFrames, timeNanos); + if (result == AAUDIO_OK) { + *positionFrames -= mTimestampPositionOffset.load(); // Offset from shared MMAP stream + } + return result; } diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h index 6b67337404..8caccda29e 100644 --- a/services/oboeservice/AAudioServiceStreamShared.h +++ b/services/oboeservice/AAudioServiceStreamShared.h @@ -85,7 +85,11 @@ public: /* Keep a record of when a buffer transfer completed. * This allows for a more accurate timing model. */ - void markTransferTime(int64_t nanoseconds); + void markTransferTime(Timestamp ×tamp); + + void setTimestampPositionOffset(int64_t deltaFrames) { + mTimestampPositionOffset.store(deltaFrames); + } protected: @@ -93,6 +97,9 @@ protected: aaudio_result_t getFreeRunningPosition(int64_t *positionFrames, int64_t *timeNanos) override; + virtual aaudio_result_t getHardwareTimestamp(int64_t *positionFrames, + int64_t *timeNanos) override; + /** * @param requestedCapacityFrames * @param framesPerBurst @@ -106,8 +113,7 @@ private: AAudioServiceEndpoint *mServiceEndpoint = nullptr; SharedRingBuffer *mAudioDataQueue = nullptr; - int64_t mMarkedPosition = 0; - int64_t mMarkedTime = 0; + std::atomic mTimestampPositionOffset; }; } /* namespace aaudio */ -- 2.11.0