mRecordingHeapCount(kDefaultRecordingHeapCount)
{
ATRACE_CALL();
+ ALOGV("%s: Created client for camera %d", __FUNCTION__, cameraId);
mDevice = new Camera2Device(cameraId);
status_t Camera2Client::initialize(camera_module_t *module)
{
ATRACE_CALL();
- ALOGV("%s: E", __FUNCTION__);
+ ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
status_t res;
+ mFrameProcessor = new FrameProcessor(this);
+ String8 frameThreadName = String8::format("Camera2Client[%d]::FrameProcessor",
+ mCameraId);
+ mFrameProcessor->run(frameThreadName.string());
+
res = mDevice->initialize(module);
if (res != OK) {
ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
}
res = mDevice->setNotifyCallback(this);
- res = mDevice->setFrameListener(this);
res = buildDeviceInfo();
res = buildDefaultParameters();
Camera2Client::~Camera2Client() {
ATRACE_CALL();
- ALOGV("%s: Camera %d: Shutting down", __FUNCTION__, mCameraId);
+ ALOGV("%s: Camera %d: Shutting down client.", __FUNCTION__, mCameraId);
mDestructionStarted = true;
// Rewrite mClientPid to allow shutdown by CameraService
mClientPid = getCallingPid();
disconnect();
+
+ mFrameProcessor->requestExit();
+ ALOGV("%s: Camera %d: Shutdown complete", __FUNCTION__, mCameraId);
}
status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
write(fd, result.string(), result.size());
}
+ mFrameProcessor->dump(fd, args);
+
result = " Device dump:\n";
write(fd, result.string(), result.size());
__FUNCTION__, newState, triggerId);
}
-void Camera2Client::onNewFrameAvailable() {
+Camera2Client::FrameProcessor::FrameProcessor(wp<Camera2Client> client):
+ Thread(false), mClient(client) {
+}
+
+Camera2Client::FrameProcessor::~FrameProcessor() {
+ ALOGV("%s: Exit", __FUNCTION__);
+}
+
+void Camera2Client::FrameProcessor::dump(int fd, const Vector<String16>& args) {
+ String8 result(" Latest received frame:\n");
+ write(fd, result.string(), result.size());
+ mLastFrame.dump(fd, 2, 6);
+}
+
+bool Camera2Client::FrameProcessor::threadLoop() {
+ status_t res;
+
+ sp<Camera2Device> device;
+ {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return false;
+ device = client->mDevice;
+ }
+
+ res = device->waitForNextFrame(kWaitDuration);
+ if (res == OK) {
+ sp<Camera2Client> client = mClient.promote();
+ if (client == 0) return false;
+ processNewFrames(client);
+ } else if (res != TIMED_OUT) {
+ ALOGE("Camera2Client::FrameProcessor: Error waiting for new "
+ "frames: %s (%d)", strerror(-res), res);
+ }
+
+ return true;
+}
+
+void Camera2Client::FrameProcessor::processNewFrames(sp<Camera2Client> &client) {
status_t res;
CameraMetadata frame;
- while ( (res = mDevice->getNextFrame(&frame)) == OK) {
+ while ( (res = client->mDevice->getNextFrame(&frame)) == OK) {
camera_metadata_entry_t entry;
entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
if (entry.count == 0) {
ALOGE("%s: Camera %d: Error reading frame number: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
+ __FUNCTION__, client->mCameraId, strerror(-res), res);
break;
}
- res = processFrameFaceDetect(frame);
+ res = processFaceDetect(frame, client);
if (res != OK) break;
+
+ mLastFrame.acquire(frame);
}
if (res != NOT_ENOUGH_DATA) {
ALOGE("%s: Camera %d: Error getting next frame: %s (%d)",
- __FUNCTION__, mCameraId, strerror(-res), res);
+ __FUNCTION__, client->mCameraId, strerror(-res), res);
return;
}
return;
}
-status_t Camera2Client::processFrameFaceDetect(const CameraMetadata &frame) {
+status_t Camera2Client::FrameProcessor::processFaceDetect(
+ const CameraMetadata &frame, sp<Camera2Client> &client) {
status_t res;
camera_metadata_ro_entry_t entry;
bool enableFaceDetect;
{
- LockedParameters::Key k(mParameters);
+ LockedParameters::Key k(client->mParameters);
enableFaceDetect = k.mParameters.enableFaceDetect;
}
entry = frame.find(ANDROID_STATS_FACE_DETECT_MODE);
entry = frame.find(ANDROID_STATS_FACE_RECTANGLES);
if (entry.count == 0) {
ALOGE("%s: Camera %d: Unable to read face rectangles",
- __FUNCTION__, mCameraId);
+ __FUNCTION__, client->mCameraId);
return res;
}
camera_frame_metadata metadata;
metadata.number_of_faces = entry.count / 4;
if (metadata.number_of_faces >
- mDeviceInfo->maxFaces) {
+ client->mDeviceInfo->maxFaces) {
ALOGE("%s: Camera %d: More faces than expected! (Got %d, max %d)",
- __FUNCTION__, mCameraId,
- metadata.number_of_faces, mDeviceInfo->maxFaces);
+ __FUNCTION__, client->mCameraId,
+ metadata.number_of_faces, client->mDeviceInfo->maxFaces);
return res;
}
const int32_t *faceRects = entry.data.i32;
entry = frame.find(ANDROID_STATS_FACE_SCORES);
if (entry.count == 0) {
ALOGE("%s: Camera %d: Unable to read face scores",
- __FUNCTION__, mCameraId);
+ __FUNCTION__, client->mCameraId);
return res;
}
const uint8_t *faceScores = entry.data.u8;
entry = frame.find(ANDROID_STATS_FACE_LANDMARKS);
if (entry.count == 0) {
ALOGE("%s: Camera %d: Unable to read face landmarks",
- __FUNCTION__, mCameraId);
+ __FUNCTION__, client->mCameraId);
return res;
}
faceLandmarks = entry.data.i32;
if (entry.count == 0) {
ALOGE("%s: Camera %d: Unable to read face IDs",
- __FUNCTION__, mCameraId);
+ __FUNCTION__, client->mCameraId);
return res;
}
faceIds = entry.data.i32;
for (int i = 0; i < metadata.number_of_faces; i++) {
camera_face_t face;
- face.rect[0] = arrayXToNormalized(faceRects[i*4 + 0]);
- face.rect[1] = arrayYToNormalized(faceRects[i*4 + 1]);
- face.rect[2] = arrayXToNormalized(faceRects[i*4 + 2]);
- face.rect[3] = arrayYToNormalized(faceRects[i*4 + 3]);
+ face.rect[0] = client->arrayXToNormalized(faceRects[i*4 + 0]);
+ face.rect[1] = client->arrayYToNormalized(faceRects[i*4 + 1]);
+ face.rect[2] = client->arrayXToNormalized(faceRects[i*4 + 2]);
+ face.rect[3] = client->arrayYToNormalized(faceRects[i*4 + 3]);
face.score = faceScores[i];
if (faceDetectMode == ANDROID_STATS_FACE_DETECTION_FULL) {
face.id = faceIds[i];
- face.left_eye[0] = arrayXToNormalized(faceLandmarks[i*6 + 0]);
- face.left_eye[1] = arrayYToNormalized(faceLandmarks[i*6 + 1]);
- face.right_eye[0] = arrayXToNormalized(faceLandmarks[i*6 + 2]);
- face.right_eye[1] = arrayYToNormalized(faceLandmarks[i*6 + 3]);
- face.mouth[0] = arrayXToNormalized(faceLandmarks[i*6 + 4]);
- face.mouth[1] = arrayYToNormalized(faceLandmarks[i*6 + 5]);
+ face.left_eye[0] =
+ client->arrayXToNormalized(faceLandmarks[i*6 + 0]);
+ face.left_eye[1] =
+ client->arrayYToNormalized(faceLandmarks[i*6 + 1]);
+ face.right_eye[0] =
+ client->arrayXToNormalized(faceLandmarks[i*6 + 2]);
+ face.right_eye[1] =
+ client->arrayYToNormalized(faceLandmarks[i*6 + 3]);
+ face.mouth[0] =
+ client->arrayXToNormalized(faceLandmarks[i*6 + 4]);
+ face.mouth[1] =
+ client->arrayYToNormalized(faceLandmarks[i*6 + 5]);
} else {
face.id = 0;
face.left_eye[0] = face.left_eye[1] = -2000;
metadata.faces = faces.editArray();
{
- Mutex::Autolock iccl(mICameraClientLock);
- if (mCameraClient != NULL) {
- mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
+ Mutex::Autolock iccl(client->mICameraClientLock);
+ if (client->mCameraClient != NULL) {
+ client->mCameraClient->dataCallback(CAMERA_MSG_PREVIEW_METADATA,
NULL, &metadata);
}
}
mId(id),
mDevice(NULL)
{
- ALOGV("%s: E", __FUNCTION__);
+ ALOGV("%s: Created device for camera %d", __FUNCTION__, id);
}
Camera2Device::~Camera2Device()
{
- ALOGV("%s: E", __FUNCTION__);
+ ALOGV("%s: Shutting down device for camera %d", __FUNCTION__, mId);
if (mDevice) {
status_t res;
res = mDevice->common.close(&mDevice->common);
}
mDevice = NULL;
}
+ ALOGV("%s: Shutdown complete", __FUNCTION__);
}
status_t Camera2Device::initialize(camera_module_t *module)
{
- ALOGV("%s: E", __FUNCTION__);
+ ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
status_t res;
char name[10];
}
}
-status_t Camera2Device::setFrameListener(FrameListener *listener) {
- return mFrameQueue.setListener(listener);
+status_t Camera2Device::waitForNextFrame(nsecs_t timeout) {
+ return mFrameQueue.waitForBuffer(timeout);
}
status_t Camera2Device::getNextFrame(CameraMetadata *frame) {
}
/**
- * Camera2Device::FrameListener
- */
-
-Camera2Device::FrameListener::~FrameListener() {
-}
-
-/**
* Camera2Device::MetadataQueue
*/
mFrameCount(0),
mCount(0),
mStreamSlotCount(0),
- mSignalConsumer(true),
- mListener(NULL)
+ mSignalConsumer(true)
{
camera2_request_queue_src_ops::dequeue_request = consumer_dequeue;
camera2_request_queue_src_ops::request_count = consumer_buffer_count;
return OK;
}
-status_t Camera2Device::MetadataQueue::setListener(FrameListener *listener) {
- Mutex::Autolock l(mMutex);
- mListener = listener;
- return OK;
-}
-
status_t Camera2Device::MetadataQueue::setStreamSlot(camera_metadata_t *buf)
{
ALOGV("%s: E", __FUNCTION__);
res = mDevice->ops->notify_request_queue_not_empty(mDevice);
mMutex.lock();
}
- if (mListener != NULL) {
- FrameListener *listener = mListener;
- mMutex.unlock();
- ALOGVV("%s: Signaling listener", __FUNCTION__);
- listener->onNewFrameAvailable();
- mMutex.lock();
- }
return res;
}