1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18
19 //#define LOG_NDEBUG 0
20 #define LOG_TAG "CameraSource"
21 #include <utils/Log.h>
22
23 #include <OMX_Component.h>
24 #include <binder/IPCThreadState.h>
25 #include <binder/MemoryBase.h>
26 #include <binder/MemoryHeapBase.h>
27 #include <media/hardware/HardwareAPI.h>
28 #include <media/stagefright/foundation/ADebug.h>
29 #include <media/stagefright/CameraSource.h>
30 #include <media/stagefright/MediaDefs.h>
31 #include <media/stagefright/MediaErrors.h>
32 #include <media/stagefright/MetaData.h>
33 #include <camera/Camera.h>
34 #include <camera/CameraParameters.h>
35 #include <gui/Surface.h>
36 #include <utils/String8.h>
37 #include <cutils/properties.h>
38
39 #if LOG_NDEBUG
40 #define UNUSED_UNLESS_VERBOSE(x) (void)(x)
41 #else
42 #define UNUSED_UNLESS_VERBOSE(x)
43 #endif
44
45 namespace android {
46
47 static const int64_t CAMERA_SOURCE_TIMEOUT_NS = 3000000000LL;
48
49 struct CameraSourceListener : public CameraListener {
50 explicit CameraSourceListener(const sp<CameraSource> &source);
51
52 virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2);
53 virtual void postData(int32_t msgType, const sp<IMemory> &dataPtr,
54 camera_frame_metadata_t *metadata);
55
56 virtual void postDataTimestamp(
57 nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
58
59 virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle);
60
61 virtual void postRecordingFrameHandleTimestampBatch(
62 const std::vector<nsecs_t>& timestamps,
63 const std::vector<native_handle_t*>& handles);
64
65 protected:
66 virtual ~CameraSourceListener();
67
68 private:
69 wp<CameraSource> mSource;
70
71 CameraSourceListener(const CameraSourceListener &);
72 CameraSourceListener &operator=(const CameraSourceListener &);
73 };
74
CameraSourceListener(const sp<CameraSource> & source)75 CameraSourceListener::CameraSourceListener(const sp<CameraSource> &source)
76 : mSource(source) {
77 }
78
~CameraSourceListener()79 CameraSourceListener::~CameraSourceListener() {
80 }
81
notify(int32_t msgType,int32_t ext1,int32_t ext2)82 void CameraSourceListener::notify(int32_t msgType, int32_t ext1, int32_t ext2) {
83 UNUSED_UNLESS_VERBOSE(msgType);
84 UNUSED_UNLESS_VERBOSE(ext1);
85 UNUSED_UNLESS_VERBOSE(ext2);
86 ALOGV("notify(%d, %d, %d)", msgType, ext1, ext2);
87 }
88
postData(int32_t msgType,const sp<IMemory> & dataPtr,camera_frame_metadata_t *)89 void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr,
90 camera_frame_metadata_t * /* metadata */) {
91 ALOGV("postData(%d, ptr:%p, size:%zu)",
92 msgType, dataPtr->pointer(), dataPtr->size());
93
94 sp<CameraSource> source = mSource.promote();
95 if (source.get() != NULL) {
96 source->dataCallback(msgType, dataPtr);
97 }
98 }
99
postDataTimestamp(nsecs_t timestamp,int32_t msgType,const sp<IMemory> & dataPtr)100 void CameraSourceListener::postDataTimestamp(
101 nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {
102
103 sp<CameraSource> source = mSource.promote();
104 if (source.get() != NULL) {
105 source->dataCallbackTimestamp(timestamp/1000, msgType, dataPtr);
106 }
107 }
108
postRecordingFrameHandleTimestamp(nsecs_t timestamp,native_handle_t * handle)109 void CameraSourceListener::postRecordingFrameHandleTimestamp(nsecs_t timestamp,
110 native_handle_t* handle) {
111 sp<CameraSource> source = mSource.promote();
112 if (source.get() != nullptr) {
113 source->recordingFrameHandleCallbackTimestamp(timestamp/1000, handle);
114 }
115 }
116
postRecordingFrameHandleTimestampBatch(const std::vector<nsecs_t> & timestamps,const std::vector<native_handle_t * > & handles)117 void CameraSourceListener::postRecordingFrameHandleTimestampBatch(
118 const std::vector<nsecs_t>& timestamps,
119 const std::vector<native_handle_t*>& handles) {
120 sp<CameraSource> source = mSource.promote();
121 if (source.get() != nullptr) {
122 int n = timestamps.size();
123 std::vector<nsecs_t> modifiedTimestamps(n);
124 for (int i = 0; i < n; i++) {
125 modifiedTimestamps[i] = timestamps[i] / 1000;
126 }
127 source->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
128 }
129 }
130
getColorFormat(const char * colorFormat)131 static int32_t getColorFormat(const char* colorFormat) {
132 if (!colorFormat) {
133 ALOGE("Invalid color format");
134 return -1;
135 }
136
137 if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420P)) {
138 return OMX_COLOR_FormatYUV420Planar;
139 }
140
141 if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422SP)) {
142 return OMX_COLOR_FormatYUV422SemiPlanar;
143 }
144
145 if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420SP)) {
146 return OMX_COLOR_FormatYUV420SemiPlanar;
147 }
148
149 if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422I)) {
150 return OMX_COLOR_FormatYCbYCr;
151 }
152
153 if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_RGB565)) {
154 return OMX_COLOR_Format16bitRGB565;
155 }
156
157 if (!strcmp(colorFormat, "OMX_TI_COLOR_FormatYUV420PackedSemiPlanar")) {
158 return OMX_TI_COLOR_FormatYUV420PackedSemiPlanar;
159 }
160
161 if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE)) {
162 return OMX_COLOR_FormatAndroidOpaque;
163 }
164
165 ALOGE("Uknown color format (%s), please add it to "
166 "CameraSource::getColorFormat", colorFormat);
167
168 CHECK(!"Unknown color format");
169 return -1;
170 }
171
Create(const String16 & clientName)172 CameraSource *CameraSource::Create(const String16 &clientName) {
173 Size size;
174 size.width = -1;
175 size.height = -1;
176
177 sp<hardware::ICamera> camera;
178 return new CameraSource(camera, NULL, 0, clientName, Camera::USE_CALLING_UID,
179 Camera::USE_CALLING_PID, size, -1, NULL, false);
180 }
181
182 // static
CreateFromCamera(const sp<hardware::ICamera> & camera,const sp<ICameraRecordingProxy> & proxy,int32_t cameraId,const String16 & clientName,uid_t clientUid,pid_t clientPid,Size videoSize,int32_t frameRate,const sp<IGraphicBufferProducer> & surface,bool storeMetaDataInVideoBuffers)183 CameraSource *CameraSource::CreateFromCamera(
184 const sp<hardware::ICamera>& camera,
185 const sp<ICameraRecordingProxy>& proxy,
186 int32_t cameraId,
187 const String16& clientName,
188 uid_t clientUid,
189 pid_t clientPid,
190 Size videoSize,
191 int32_t frameRate,
192 const sp<IGraphicBufferProducer>& surface,
193 bool storeMetaDataInVideoBuffers) {
194
195 CameraSource *source = new CameraSource(camera, proxy, cameraId,
196 clientName, clientUid, clientPid, videoSize, frameRate, surface,
197 storeMetaDataInVideoBuffers);
198 return source;
199 }
200
CameraSource(const sp<hardware::ICamera> & camera,const sp<ICameraRecordingProxy> & proxy,int32_t cameraId,const String16 & clientName,uid_t clientUid,pid_t clientPid,Size videoSize,int32_t frameRate,const sp<IGraphicBufferProducer> & surface,bool storeMetaDataInVideoBuffers)201 CameraSource::CameraSource(
202 const sp<hardware::ICamera>& camera,
203 const sp<ICameraRecordingProxy>& proxy,
204 int32_t cameraId,
205 const String16& clientName,
206 uid_t clientUid,
207 pid_t clientPid,
208 Size videoSize,
209 int32_t frameRate,
210 const sp<IGraphicBufferProducer>& surface,
211 bool storeMetaDataInVideoBuffers)
212 : mCameraFlags(0),
213 mNumInputBuffers(0),
214 mVideoFrameRate(-1),
215 mCamera(0),
216 mSurface(surface),
217 mNumFramesReceived(0),
218 mLastFrameTimestampUs(0),
219 mStarted(false),
220 mEos(false),
221 mNumFramesEncoded(0),
222 mTimeBetweenFrameCaptureUs(0),
223 mFirstFrameTimeUs(0),
224 mStopSystemTimeUs(-1),
225 mNumFramesDropped(0),
226 mNumGlitches(0),
227 mGlitchDurationThresholdUs(200000),
228 mCollectStats(false) {
229 mVideoSize.width = -1;
230 mVideoSize.height = -1;
231
232 mInitCheck = init(camera, proxy, cameraId,
233 clientName, clientUid, clientPid,
234 videoSize, frameRate,
235 storeMetaDataInVideoBuffers);
236 if (mInitCheck != OK) releaseCamera();
237 }
238
initCheck() const239 status_t CameraSource::initCheck() const {
240 return mInitCheck;
241 }
242
isCameraAvailable(const sp<hardware::ICamera> & camera,const sp<ICameraRecordingProxy> & proxy,int32_t cameraId,const String16 & clientName,uid_t clientUid,pid_t clientPid)243 status_t CameraSource::isCameraAvailable(
244 const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
245 int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid) {
246
247 if (camera == 0) {
248 mCamera = Camera::connect(cameraId, clientName, clientUid, clientPid);
249 if (mCamera == 0) return -EBUSY;
250 mCameraFlags &= ~FLAGS_HOT_CAMERA;
251 } else {
252 // We get the proxy from Camera, not ICamera. We need to get the proxy
253 // to the remote Camera owned by the application. Here mCamera is a
254 // local Camera object created by us. We cannot use the proxy from
255 // mCamera here.
256 mCamera = Camera::create(camera);
257 if (mCamera == 0) return -EBUSY;
258 mCameraRecordingProxy = proxy;
259 mCameraFlags |= FLAGS_HOT_CAMERA;
260 mDeathNotifier = new DeathNotifier();
261 // isBinderAlive needs linkToDeath to work.
262 IInterface::asBinder(mCameraRecordingProxy)->linkToDeath(mDeathNotifier);
263 }
264
265 mCamera->lock();
266
267 return OK;
268 }
269
270
271 /*
272 * Check to see whether the requested video width and height is one
273 * of the supported sizes.
274 * @param width the video frame width in pixels
275 * @param height the video frame height in pixels
276 * @param suppportedSizes the vector of sizes that we check against
277 * @return true if the dimension (width and height) is supported.
278 */
isVideoSizeSupported(int32_t width,int32_t height,const Vector<Size> & supportedSizes)279 static bool isVideoSizeSupported(
280 int32_t width, int32_t height,
281 const Vector<Size>& supportedSizes) {
282
283 ALOGV("isVideoSizeSupported");
284 for (size_t i = 0; i < supportedSizes.size(); ++i) {
285 if (width == supportedSizes[i].width &&
286 height == supportedSizes[i].height) {
287 return true;
288 }
289 }
290 return false;
291 }
292
293 /*
294 * If the preview and video output is separate, we only set the
295 * the video size, and applications should set the preview size
296 * to some proper value, and the recording framework will not
297 * change the preview size; otherwise, if the video and preview
298 * output is the same, we need to set the preview to be the same
299 * as the requested video size.
300 *
301 */
302 /*
303 * Query the camera to retrieve the supported video frame sizes
304 * and also to see whether CameraParameters::setVideoSize()
305 * is supported or not.
306 * @param params CameraParameters to retrieve the information
307 * @@param isSetVideoSizeSupported retunrs whether method
308 * CameraParameters::setVideoSize() is supported or not.
309 * @param sizes returns the vector of Size objects for the
310 * supported video frame sizes advertised by the camera.
311 */
getSupportedVideoSizes(const CameraParameters & params,bool * isSetVideoSizeSupported,Vector<Size> & sizes)312 static void getSupportedVideoSizes(
313 const CameraParameters& params,
314 bool *isSetVideoSizeSupported,
315 Vector<Size>& sizes) {
316
317 *isSetVideoSizeSupported = true;
318 params.getSupportedVideoSizes(sizes);
319 if (sizes.size() == 0) {
320 ALOGD("Camera does not support setVideoSize()");
321 params.getSupportedPreviewSizes(sizes);
322 *isSetVideoSizeSupported = false;
323 }
324 }
325
326 /*
327 * Check whether the camera has the supported color format
328 * @param params CameraParameters to retrieve the information
329 * @return OK if no error.
330 */
isCameraColorFormatSupported(const CameraParameters & params)331 status_t CameraSource::isCameraColorFormatSupported(
332 const CameraParameters& params) {
333 mColorFormat = getColorFormat(params.get(
334 CameraParameters::KEY_VIDEO_FRAME_FORMAT));
335 if (mColorFormat == -1) {
336 return BAD_VALUE;
337 }
338 return OK;
339 }
340
341 /*
342 * Configure the camera to use the requested video size
343 * (width and height) and/or frame rate. If both width and
344 * height are -1, configuration on the video size is skipped.
345 * if frameRate is -1, configuration on the frame rate
346 * is skipped. Skipping the configuration allows one to
347 * use the current camera setting without the need to
348 * actually know the specific values (see Create() method).
349 *
350 * @param params the CameraParameters to be configured
351 * @param width the target video frame width in pixels
352 * @param height the target video frame height in pixels
353 * @param frameRate the target frame rate in frames per second.
354 * @return OK if no error.
355 */
configureCamera(CameraParameters * params,int32_t width,int32_t height,int32_t frameRate)356 status_t CameraSource::configureCamera(
357 CameraParameters* params,
358 int32_t width, int32_t height,
359 int32_t frameRate) {
360 ALOGV("configureCamera");
361 Vector<Size> sizes;
362 bool isSetVideoSizeSupportedByCamera = true;
363 getSupportedVideoSizes(*params, &isSetVideoSizeSupportedByCamera, sizes);
364 bool isCameraParamChanged = false;
365 if (width != -1 && height != -1) {
366 if (!isVideoSizeSupported(width, height, sizes)) {
367 ALOGE("Video dimension (%dx%d) is unsupported", width, height);
368 return BAD_VALUE;
369 }
370 if (isSetVideoSizeSupportedByCamera) {
371 params->setVideoSize(width, height);
372 } else {
373 params->setPreviewSize(width, height);
374 }
375 isCameraParamChanged = true;
376 } else if ((width == -1 && height != -1) ||
377 (width != -1 && height == -1)) {
378 // If one and only one of the width and height is -1
379 // we reject such a request.
380 ALOGE("Requested video size (%dx%d) is not supported", width, height);
381 return BAD_VALUE;
382 } else { // width == -1 && height == -1
383 // Do not configure the camera.
384 // Use the current width and height value setting from the camera.
385 }
386
387 if (frameRate != -1) {
388 CHECK(frameRate > 0 && frameRate <= 120);
389 const char* supportedFrameRates =
390 params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES);
391 CHECK(supportedFrameRates != NULL);
392 ALOGV("Supported frame rates: %s", supportedFrameRates);
393 char buf[4];
394 snprintf(buf, 4, "%d", frameRate);
395 if (strstr(supportedFrameRates, buf) == NULL) {
396 ALOGE("Requested frame rate (%d) is not supported: %s",
397 frameRate, supportedFrameRates);
398 return BAD_VALUE;
399 }
400
401 // The frame rate is supported, set the camera to the requested value.
402 params->setPreviewFrameRate(frameRate);
403 isCameraParamChanged = true;
404 } else { // frameRate == -1
405 // Do not configure the camera.
406 // Use the current frame rate value setting from the camera
407 }
408
409 if (isCameraParamChanged) {
410 // Either frame rate or frame size needs to be changed.
411 String8 s = params->flatten();
412 if (OK != mCamera->setParameters(s)) {
413 ALOGE("Could not change settings."
414 " Someone else is using camera %p?", mCamera.get());
415 return -EBUSY;
416 }
417 }
418 return OK;
419 }
420
421 /*
422 * Check whether the requested video frame size
423 * has been successfully configured or not. If both width and height
424 * are -1, check on the current width and height value setting
425 * is performed.
426 *
427 * @param params CameraParameters to retrieve the information
428 * @param the target video frame width in pixels to check against
429 * @param the target video frame height in pixels to check against
430 * @return OK if no error
431 */
checkVideoSize(const CameraParameters & params,int32_t width,int32_t height)432 status_t CameraSource::checkVideoSize(
433 const CameraParameters& params,
434 int32_t width, int32_t height) {
435
436 ALOGV("checkVideoSize");
437 // The actual video size is the same as the preview size
438 // if the camera hal does not support separate video and
439 // preview output. In this case, we retrieve the video
440 // size from preview.
441 int32_t frameWidthActual = -1;
442 int32_t frameHeightActual = -1;
443 Vector<Size> sizes;
444 params.getSupportedVideoSizes(sizes);
445 if (sizes.size() == 0) {
446 // video size is the same as preview size
447 params.getPreviewSize(&frameWidthActual, &frameHeightActual);
448 } else {
449 // video size may not be the same as preview
450 params.getVideoSize(&frameWidthActual, &frameHeightActual);
451 }
452 if (frameWidthActual < 0 || frameHeightActual < 0) {
453 ALOGE("Failed to retrieve video frame size (%dx%d)",
454 frameWidthActual, frameHeightActual);
455 return UNKNOWN_ERROR;
456 }
457
458 // Check the actual video frame size against the target/requested
459 // video frame size.
460 if (width != -1 && height != -1) {
461 if (frameWidthActual != width || frameHeightActual != height) {
462 ALOGE("Failed to set video frame size to %dx%d. "
463 "The actual video size is %dx%d ", width, height,
464 frameWidthActual, frameHeightActual);
465 return UNKNOWN_ERROR;
466 }
467 }
468
469 // Good now.
470 mVideoSize.width = frameWidthActual;
471 mVideoSize.height = frameHeightActual;
472 return OK;
473 }
474
475 /*
476 * Check the requested frame rate has been successfully configured or not.
477 * If the target frameRate is -1, check on the current frame rate value
478 * setting is performed.
479 *
480 * @param params CameraParameters to retrieve the information
481 * @param the target video frame rate to check against
482 * @return OK if no error.
483 */
checkFrameRate(const CameraParameters & params,int32_t frameRate)484 status_t CameraSource::checkFrameRate(
485 const CameraParameters& params,
486 int32_t frameRate) {
487
488 ALOGV("checkFrameRate");
489 int32_t frameRateActual = params.getPreviewFrameRate();
490 if (frameRateActual < 0) {
491 ALOGE("Failed to retrieve preview frame rate (%d)", frameRateActual);
492 return UNKNOWN_ERROR;
493 }
494
495 // Check the actual video frame rate against the target/requested
496 // video frame rate.
497 if (frameRate != -1 && (frameRateActual - frameRate) != 0) {
498 ALOGE("Failed to set preview frame rate to %d fps. The actual "
499 "frame rate is %d", frameRate, frameRateActual);
500 return UNKNOWN_ERROR;
501 }
502
503 // Good now.
504 mVideoFrameRate = frameRateActual;
505 return OK;
506 }
507
508 /*
509 * Initialize the CameraSource to so that it becomes
510 * ready for providing the video input streams as requested.
511 * @param camera the camera object used for the video source
512 * @param cameraId if camera == 0, use camera with this id
513 * as the video source
514 * @param videoSize the target video frame size. If both
515 * width and height in videoSize is -1, use the current
516 * width and heigth settings by the camera
517 * @param frameRate the target frame rate in frames per second.
518 * if it is -1, use the current camera frame rate setting.
519 * @param storeMetaDataInVideoBuffers request to store meta
520 * data or real YUV data in video buffers. Request to
521 * store meta data in video buffers may not be honored
522 * if the source does not support this feature.
523 *
524 * @return OK if no error.
525 */
init(const sp<hardware::ICamera> & camera,const sp<ICameraRecordingProxy> & proxy,int32_t cameraId,const String16 & clientName,uid_t clientUid,pid_t clientPid,Size videoSize,int32_t frameRate,bool storeMetaDataInVideoBuffers)526 status_t CameraSource::init(
527 const sp<hardware::ICamera>& camera,
528 const sp<ICameraRecordingProxy>& proxy,
529 int32_t cameraId,
530 const String16& clientName,
531 uid_t clientUid,
532 pid_t clientPid,
533 Size videoSize,
534 int32_t frameRate,
535 bool storeMetaDataInVideoBuffers) {
536
537 ALOGV("init");
538 status_t err = OK;
539 int64_t token = IPCThreadState::self()->clearCallingIdentity();
540 err = initWithCameraAccess(camera, proxy, cameraId, clientName, clientUid, clientPid,
541 videoSize, frameRate,
542 storeMetaDataInVideoBuffers);
543 IPCThreadState::self()->restoreCallingIdentity(token);
544 return err;
545 }
546
createVideoBufferMemoryHeap(size_t size,uint32_t bufferCount)547 void CameraSource::createVideoBufferMemoryHeap(size_t size, uint32_t bufferCount) {
548 mMemoryHeapBase = new MemoryHeapBase(size * bufferCount, 0,
549 "StageFright-CameraSource-BufferHeap");
550 for (uint32_t i = 0; i < bufferCount; i++) {
551 mMemoryBases.push_back(new MemoryBase(mMemoryHeapBase, i * size, size));
552 }
553 }
554
initBufferQueue(uint32_t width,uint32_t height,uint32_t format,android_dataspace dataSpace,uint32_t bufferCount)555 status_t CameraSource::initBufferQueue(uint32_t width, uint32_t height,
556 uint32_t format, android_dataspace dataSpace, uint32_t bufferCount) {
557 ALOGV("initBufferQueue");
558
559 if (mVideoBufferConsumer != nullptr || mVideoBufferProducer != nullptr) {
560 ALOGE("%s: Buffer queue already exists", __FUNCTION__);
561 return ALREADY_EXISTS;
562 }
563
564 // Create a buffer queue.
565 sp<IGraphicBufferProducer> producer;
566 sp<IGraphicBufferConsumer> consumer;
567 BufferQueue::createBufferQueue(&producer, &consumer);
568
569 uint32_t usage = GRALLOC_USAGE_SW_READ_OFTEN;
570 if (format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
571 usage = GRALLOC_USAGE_HW_VIDEO_ENCODER;
572 }
573
574 bufferCount += kConsumerBufferCount;
575
576 mVideoBufferConsumer = new BufferItemConsumer(consumer, usage, bufferCount);
577 mVideoBufferConsumer->setName(String8::format("StageFright-CameraSource"));
578 mVideoBufferProducer = producer;
579
580 status_t res = mVideoBufferConsumer->setDefaultBufferSize(width, height);
581 if (res != OK) {
582 ALOGE("%s: Could not set buffer dimensions %dx%d: %s (%d)", __FUNCTION__, width, height,
583 strerror(-res), res);
584 return res;
585 }
586
587 res = mVideoBufferConsumer->setDefaultBufferFormat(format);
588 if (res != OK) {
589 ALOGE("%s: Could not set buffer format %d: %s (%d)", __FUNCTION__, format,
590 strerror(-res), res);
591 return res;
592 }
593
594 res = mVideoBufferConsumer->setDefaultBufferDataSpace(dataSpace);
595 if (res != OK) {
596 ALOGE("%s: Could not set data space %d: %s (%d)", __FUNCTION__, dataSpace,
597 strerror(-res), res);
598 return res;
599 }
600
601 res = mCamera->setVideoTarget(mVideoBufferProducer);
602 if (res != OK) {
603 ALOGE("%s: Failed to set video target: %s (%d)", __FUNCTION__, strerror(-res), res);
604 return res;
605 }
606
607 // Create memory heap to store buffers as VideoNativeMetadata.
608 createVideoBufferMemoryHeap(sizeof(VideoNativeMetadata), bufferCount);
609
610 mBufferQueueListener = new BufferQueueListener(mVideoBufferConsumer, this);
611 res = mBufferQueueListener->run("CameraSource-BufferQueueListener");
612 if (res != OK) {
613 ALOGE("%s: Could not run buffer queue listener thread: %s (%d)", __FUNCTION__,
614 strerror(-res), res);
615 return res;
616 }
617
618 return OK;
619 }
620
initWithCameraAccess(const sp<hardware::ICamera> & camera,const sp<ICameraRecordingProxy> & proxy,int32_t cameraId,const String16 & clientName,uid_t clientUid,pid_t clientPid,Size videoSize,int32_t frameRate,bool storeMetaDataInVideoBuffers)621 status_t CameraSource::initWithCameraAccess(
622 const sp<hardware::ICamera>& camera,
623 const sp<ICameraRecordingProxy>& proxy,
624 int32_t cameraId,
625 const String16& clientName,
626 uid_t clientUid,
627 pid_t clientPid,
628 Size videoSize,
629 int32_t frameRate,
630 bool storeMetaDataInVideoBuffers) {
631 ALOGV("initWithCameraAccess");
632 status_t err = OK;
633
634 if ((err = isCameraAvailable(camera, proxy, cameraId,
635 clientName, clientUid, clientPid)) != OK) {
636 ALOGE("Camera connection could not be established.");
637 return err;
638 }
639 CameraParameters params(mCamera->getParameters());
640 if ((err = isCameraColorFormatSupported(params)) != OK) {
641 return err;
642 }
643
644 // Set the camera to use the requested video frame size
645 // and/or frame rate.
646 if ((err = configureCamera(¶ms,
647 videoSize.width, videoSize.height,
648 frameRate))) {
649 return err;
650 }
651
652 // Check on video frame size and frame rate.
653 CameraParameters newCameraParams(mCamera->getParameters());
654 if ((err = checkVideoSize(newCameraParams,
655 videoSize.width, videoSize.height)) != OK) {
656 return err;
657 }
658 if ((err = checkFrameRate(newCameraParams, frameRate)) != OK) {
659 return err;
660 }
661
662 // Set the preview display. Skip this if mSurface is null because
663 // applications may already set a surface to the camera.
664 if (mSurface != NULL) {
665 // This CHECK is good, since we just passed the lock/unlock
666 // check earlier by calling mCamera->setParameters().
667 CHECK_EQ((status_t)OK, mCamera->setPreviewTarget(mSurface));
668 }
669
670 // By default, store real data in video buffers.
671 mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
672 if (storeMetaDataInVideoBuffers) {
673 if (OK == mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE)) {
674 mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE;
675 } else if (OK == mCamera->setVideoBufferMode(
676 hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA)) {
677 mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA;
678 }
679 }
680
681 if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
682 err = mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV);
683 if (err != OK) {
684 ALOGE("%s: Setting video buffer mode to VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV failed: "
685 "%s (err=%d)", __FUNCTION__, strerror(-err), err);
686 return err;
687 }
688 }
689
690 int64_t glitchDurationUs = (1000000LL / mVideoFrameRate);
691 if (glitchDurationUs > mGlitchDurationThresholdUs) {
692 mGlitchDurationThresholdUs = glitchDurationUs;
693 }
694
695 // XXX: query camera for the stride and slice height
696 // when the capability becomes available.
697 mMeta = new MetaData;
698 mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_RAW);
699 mMeta->setInt32(kKeyColorFormat, mColorFormat);
700 mMeta->setInt32(kKeyWidth, mVideoSize.width);
701 mMeta->setInt32(kKeyHeight, mVideoSize.height);
702 mMeta->setInt32(kKeyStride, mVideoSize.width);
703 mMeta->setInt32(kKeySliceHeight, mVideoSize.height);
704 mMeta->setInt32(kKeyFrameRate, mVideoFrameRate);
705 return OK;
706 }
707
~CameraSource()708 CameraSource::~CameraSource() {
709 if (mStarted) {
710 reset();
711 } else if (mInitCheck == OK) {
712 // Camera is initialized but because start() is never called,
713 // the lock on Camera is never released(). This makes sure
714 // Camera's lock is released in this case.
715 releaseCamera();
716 }
717 }
718
startCameraRecording()719 status_t CameraSource::startCameraRecording() {
720 ALOGV("startCameraRecording");
721 // Reset the identity to the current thread because media server owns the
722 // camera and recording is started by the applications. The applications
723 // will connect to the camera in ICameraRecordingProxy::startRecording.
724 int64_t token = IPCThreadState::self()->clearCallingIdentity();
725 status_t err;
726
727 if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
728 // Initialize buffer queue.
729 err = initBufferQueue(mVideoSize.width, mVideoSize.height, mEncoderFormat,
730 (android_dataspace_t)mEncoderDataSpace,
731 mNumInputBuffers > 0 ? mNumInputBuffers : 1);
732 if (err != OK) {
733 ALOGE("%s: Failed to initialize buffer queue: %s (err=%d)", __FUNCTION__,
734 strerror(-err), err);
735 return err;
736 }
737 } else {
738 if (mNumInputBuffers > 0) {
739 err = mCamera->sendCommand(
740 CAMERA_CMD_SET_VIDEO_BUFFER_COUNT, mNumInputBuffers, 0);
741
742 // This could happen for CameraHAL1 clients; thus the failure is
743 // not a fatal error
744 if (err != OK) {
745 ALOGW("Failed to set video buffer count to %d due to %d",
746 mNumInputBuffers, err);
747 }
748 }
749
750 err = mCamera->sendCommand(
751 CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);
752
753 // This could happen for CameraHAL1 clients; thus the failure is
754 // not a fatal error
755 if (err != OK) {
756 ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
757 mEncoderFormat, mEncoderDataSpace, err);
758 }
759
760 // Create memory heap to store buffers as VideoNativeMetadata.
761 createVideoBufferMemoryHeap(sizeof(VideoNativeHandleMetadata), kDefaultVideoBufferCount);
762 }
763
764 err = OK;
765 if (mCameraFlags & FLAGS_HOT_CAMERA) {
766 mCamera->unlock();
767 mCamera.clear();
768 if ((err = mCameraRecordingProxy->startRecording(
769 new ProxyListener(this))) != OK) {
770 ALOGE("Failed to start recording, received error: %s (%d)",
771 strerror(-err), err);
772 }
773 } else {
774 mCamera->setListener(new CameraSourceListener(this));
775 mCamera->startRecording();
776 if (!mCamera->recordingEnabled()) {
777 err = -EINVAL;
778 ALOGE("Failed to start recording");
779 }
780 }
781 IPCThreadState::self()->restoreCallingIdentity(token);
782 return err;
783 }
784
start(MetaData * meta)785 status_t CameraSource::start(MetaData *meta) {
786 ALOGV("start");
787 CHECK(!mStarted);
788 if (mInitCheck != OK) {
789 ALOGE("CameraSource is not initialized yet");
790 return mInitCheck;
791 }
792
793 if (property_get_bool("media.stagefright.record-stats", false)) {
794 mCollectStats = true;
795 }
796
797 mStartTimeUs = 0;
798 mNumInputBuffers = 0;
799 mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
800 mEncoderDataSpace = HAL_DATASPACE_V0_BT709;
801
802 if (meta) {
803 int64_t startTimeUs;
804 if (meta->findInt64(kKeyTime, &startTimeUs)) {
805 mStartTimeUs = startTimeUs;
806 }
807
808 int32_t nBuffers;
809 if (meta->findInt32(kKeyNumBuffers, &nBuffers)) {
810 CHECK_GT(nBuffers, 0);
811 mNumInputBuffers = nBuffers;
812 }
813
814 // apply encoder color format if specified
815 if (meta->findInt32(kKeyPixelFormat, &mEncoderFormat)) {
816 ALOGI("Using encoder format: %#x", mEncoderFormat);
817 }
818 if (meta->findInt32(kKeyColorSpace, &mEncoderDataSpace)) {
819 ALOGI("Using encoder data space: %#x", mEncoderDataSpace);
820 }
821 }
822
823 status_t err;
824 if ((err = startCameraRecording()) == OK) {
825 mStarted = true;
826 }
827
828 return err;
829 }
830
stopCameraRecording()831 void CameraSource::stopCameraRecording() {
832 ALOGV("stopCameraRecording");
833 if (mCameraFlags & FLAGS_HOT_CAMERA) {
834 if (mCameraRecordingProxy != 0) {
835 mCameraRecordingProxy->stopRecording();
836 }
837 } else {
838 if (mCamera != 0) {
839 mCamera->setListener(NULL);
840 mCamera->stopRecording();
841 }
842 }
843 }
844
releaseCamera()845 void CameraSource::releaseCamera() {
846 ALOGV("releaseCamera");
847 sp<Camera> camera;
848 bool coldCamera = false;
849 {
850 Mutex::Autolock autoLock(mLock);
851 // get a local ref and clear ref to mCamera now
852 camera = mCamera;
853 mCamera.clear();
854 coldCamera = (mCameraFlags & FLAGS_HOT_CAMERA) == 0;
855 }
856
857 if (camera != 0) {
858 int64_t token = IPCThreadState::self()->clearCallingIdentity();
859 if (coldCamera) {
860 ALOGV("Camera was cold when we started, stopping preview");
861 camera->stopPreview();
862 camera->disconnect();
863 }
864 camera->unlock();
865 IPCThreadState::self()->restoreCallingIdentity(token);
866 }
867
868 {
869 Mutex::Autolock autoLock(mLock);
870 if (mCameraRecordingProxy != 0) {
871 IInterface::asBinder(mCameraRecordingProxy)->unlinkToDeath(mDeathNotifier);
872 mCameraRecordingProxy.clear();
873 }
874 mCameraFlags = 0;
875 }
876 }
877
reset()878 status_t CameraSource::reset() {
879 ALOGD("reset: E");
880
881 {
882 Mutex::Autolock autoLock(mLock);
883 mStarted = false;
884 mEos = false;
885 mStopSystemTimeUs = -1;
886 mFrameAvailableCondition.signal();
887
888 int64_t token;
889 bool isTokenValid = false;
890 if (mCamera != 0) {
891 token = IPCThreadState::self()->clearCallingIdentity();
892 isTokenValid = true;
893 }
894 releaseQueuedFrames();
895 while (!mFramesBeingEncoded.empty()) {
896 if (NO_ERROR !=
897 mFrameCompleteCondition.waitRelative(mLock,
898 mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
899 ALOGW("Timed out waiting for outstanding frames being encoded: %zu",
900 mFramesBeingEncoded.size());
901 }
902 }
903 stopCameraRecording();
904 if (isTokenValid) {
905 IPCThreadState::self()->restoreCallingIdentity(token);
906 }
907
908 if (mCollectStats) {
909 ALOGI("Frames received/encoded/dropped: %d/%d/%d in %" PRId64 " us",
910 mNumFramesReceived, mNumFramesEncoded, mNumFramesDropped,
911 mLastFrameTimestampUs - mFirstFrameTimeUs);
912 }
913
914 if (mNumGlitches > 0) {
915 ALOGW("%d long delays between neighboring video frames", mNumGlitches);
916 }
917
918 CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped);
919 }
920
921 if (mBufferQueueListener != nullptr) {
922 mBufferQueueListener->requestExit();
923 mBufferQueueListener->join();
924 mBufferQueueListener.clear();
925 }
926
927 mVideoBufferConsumer.clear();
928 mVideoBufferProducer.clear();
929 releaseCamera();
930
931 ALOGD("reset: X");
932 return OK;
933 }
934
releaseRecordingFrame(const sp<IMemory> & frame)935 void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
936 ALOGV("releaseRecordingFrame");
937
938 if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
939 // Return the buffer to buffer queue in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
940 ssize_t offset;
941 size_t size;
942 sp<IMemoryHeap> heap = frame->getMemory(&offset, &size);
943 if (heap->getHeapID() != mMemoryHeapBase->getHeapID()) {
944 ALOGE("%s: Mismatched heap ID, ignoring release (got %x, expected %x)", __FUNCTION__,
945 heap->getHeapID(), mMemoryHeapBase->getHeapID());
946 return;
947 }
948
949 VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
950 (uint8_t*)heap->getBase() + offset);
951
952 // Find the corresponding buffer item for the native window buffer.
953 ssize_t index = mReceivedBufferItemMap.indexOfKey(payload->pBuffer);
954 if (index == NAME_NOT_FOUND) {
955 ALOGE("%s: Couldn't find buffer item for %p", __FUNCTION__, payload->pBuffer);
956 return;
957 }
958
959 BufferItem buffer = mReceivedBufferItemMap.valueAt(index);
960 mReceivedBufferItemMap.removeItemsAt(index);
961 mVideoBufferConsumer->releaseBuffer(buffer);
962 mMemoryBases.push_back(frame);
963 mMemoryBaseAvailableCond.signal();
964 } else {
965 native_handle_t* handle = nullptr;
966
967 // Check if frame contains a VideoNativeHandleMetadata.
968 if (frame->size() == sizeof(VideoNativeHandleMetadata)) {
969 VideoNativeHandleMetadata *metadata =
970 (VideoNativeHandleMetadata*)(frame->pointer());
971 if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
972 handle = metadata->pHandle;
973 }
974 }
975
976 if (handle != nullptr) {
977 ssize_t offset;
978 size_t size;
979 sp<IMemoryHeap> heap = frame->getMemory(&offset, &size);
980 if (heap->getHeapID() != mMemoryHeapBase->getHeapID()) {
981 ALOGE("%s: Mismatched heap ID, ignoring release (got %x, expected %x)",
982 __FUNCTION__, heap->getHeapID(), mMemoryHeapBase->getHeapID());
983 return;
984 }
985 uint32_t batchSize = 0;
986 {
987 Mutex::Autolock autoLock(mBatchLock);
988 if (mInflightBatchSizes.size() > 0) {
989 batchSize = mInflightBatchSizes[0];
990 }
991 }
992 if (batchSize == 0) { // return buffers one by one
993 // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
994 releaseRecordingFrameHandle(handle);
995 mMemoryBases.push_back(frame);
996 mMemoryBaseAvailableCond.signal();
997 } else { // Group buffers in batch then return
998 Mutex::Autolock autoLock(mBatchLock);
999 mInflightReturnedHandles.push_back(handle);
1000 mInflightReturnedMemorys.push_back(frame);
1001 if (mInflightReturnedHandles.size() == batchSize) {
1002 releaseRecordingFrameHandleBatch(mInflightReturnedHandles);
1003
1004 mInflightBatchSizes.pop_front();
1005 mInflightReturnedHandles.clear();
1006 for (const auto& mem : mInflightReturnedMemorys) {
1007 mMemoryBases.push_back(mem);
1008 mMemoryBaseAvailableCond.signal();
1009 }
1010 mInflightReturnedMemorys.clear();
1011 }
1012 }
1013
1014 } else if (mCameraRecordingProxy != nullptr) {
1015 // mCamera is created by application. Return the frame back to camera via camera
1016 // recording proxy.
1017 mCameraRecordingProxy->releaseRecordingFrame(frame);
1018 } else if (mCamera != nullptr) {
1019 // mCamera is created by CameraSource. Return the frame directly back to camera.
1020 int64_t token = IPCThreadState::self()->clearCallingIdentity();
1021 mCamera->releaseRecordingFrame(frame);
1022 IPCThreadState::self()->restoreCallingIdentity(token);
1023 }
1024 }
1025 }
1026
releaseQueuedFrames()1027 void CameraSource::releaseQueuedFrames() {
1028 List<sp<IMemory> >::iterator it;
1029 while (!mFramesReceived.empty()) {
1030 it = mFramesReceived.begin();
1031 releaseRecordingFrame(*it);
1032 mFramesReceived.erase(it);
1033 ++mNumFramesDropped;
1034 }
1035 }
1036
getFormat()1037 sp<MetaData> CameraSource::getFormat() {
1038 return mMeta;
1039 }
1040
releaseOneRecordingFrame(const sp<IMemory> & frame)1041 void CameraSource::releaseOneRecordingFrame(const sp<IMemory>& frame) {
1042 releaseRecordingFrame(frame);
1043 }
1044
signalBufferReturned(MediaBufferBase * buffer)1045 void CameraSource::signalBufferReturned(MediaBufferBase *buffer) {
1046 ALOGV("signalBufferReturned: %p", buffer->data());
1047 Mutex::Autolock autoLock(mLock);
1048 for (List<sp<IMemory> >::iterator it = mFramesBeingEncoded.begin();
1049 it != mFramesBeingEncoded.end(); ++it) {
1050 if ((*it)->pointer() == buffer->data()) {
1051 releaseOneRecordingFrame((*it));
1052 mFramesBeingEncoded.erase(it);
1053 ++mNumFramesEncoded;
1054 buffer->setObserver(0);
1055 buffer->release();
1056 mFrameCompleteCondition.signal();
1057 return;
1058 }
1059 }
1060 CHECK(!"signalBufferReturned: bogus buffer");
1061 }
1062
read(MediaBufferBase ** buffer,const ReadOptions * options)1063 status_t CameraSource::read(
1064 MediaBufferBase **buffer, const ReadOptions *options) {
1065 ALOGV("read");
1066
1067 *buffer = NULL;
1068
1069 int64_t seekTimeUs;
1070 ReadOptions::SeekMode mode;
1071 if (options && options->getSeekTo(&seekTimeUs, &mode)) {
1072 return ERROR_UNSUPPORTED;
1073 }
1074
1075 sp<IMemory> frame;
1076 int64_t frameTime;
1077
1078 {
1079 Mutex::Autolock autoLock(mLock);
1080 while (mStarted && !mEos && mFramesReceived.empty()) {
1081 if (NO_ERROR !=
1082 mFrameAvailableCondition.waitRelative(mLock,
1083 mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
1084 if (mCameraRecordingProxy != 0 &&
1085 !IInterface::asBinder(mCameraRecordingProxy)->isBinderAlive()) {
1086 ALOGW("camera recording proxy is gone");
1087 return ERROR_END_OF_STREAM;
1088 }
1089 ALOGW("Timed out waiting for incoming camera video frames: %" PRId64 " us",
1090 mLastFrameTimestampUs);
1091 }
1092 }
1093 if (!mStarted) {
1094 return OK;
1095 }
1096 if (mFramesReceived.empty()) {
1097 return ERROR_END_OF_STREAM;
1098 }
1099 frame = *mFramesReceived.begin();
1100 mFramesReceived.erase(mFramesReceived.begin());
1101
1102 frameTime = *mFrameTimes.begin();
1103 mFrameTimes.erase(mFrameTimes.begin());
1104 mFramesBeingEncoded.push_back(frame);
1105 *buffer = new MediaBuffer(frame->pointer(), frame->size());
1106 (*buffer)->setObserver(this);
1107 (*buffer)->add_ref();
1108 (*buffer)->meta_data().setInt64(kKeyTime, frameTime);
1109 }
1110 return OK;
1111 }
1112
setStopTimeUs(int64_t stopTimeUs)1113 status_t CameraSource::setStopTimeUs(int64_t stopTimeUs) {
1114 Mutex::Autolock autoLock(mLock);
1115 ALOGV("Set stoptime: %lld us", (long long)stopTimeUs);
1116
1117 if (stopTimeUs < -1) {
1118 ALOGE("Invalid stop time %lld us", (long long)stopTimeUs);
1119 return BAD_VALUE;
1120 } else if (stopTimeUs == -1) {
1121 ALOGI("reset stopTime to be -1");
1122 }
1123
1124 mStopSystemTimeUs = stopTimeUs;
1125 return OK;
1126 }
1127
shouldSkipFrameLocked(int64_t timestampUs)1128 bool CameraSource::shouldSkipFrameLocked(int64_t timestampUs) {
1129 if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
1130 ALOGV("Drop frame at %lld/%lld us", (long long)timestampUs, (long long)mStartTimeUs);
1131 return true;
1132 }
1133
1134 if (mStopSystemTimeUs != -1 && timestampUs >= mStopSystemTimeUs) {
1135 ALOGV("Drop Camera frame at %lld stop time: %lld us",
1136 (long long)timestampUs, (long long)mStopSystemTimeUs);
1137 mEos = true;
1138 mFrameAvailableCondition.signal();
1139 return true;
1140 }
1141
1142 // May need to skip frame or modify timestamp. Currently implemented
1143 // by the subclass CameraSourceTimeLapse.
1144 if (skipCurrentFrame(timestampUs)) {
1145 return true;
1146 }
1147
1148 if (mNumFramesReceived > 0) {
1149 if (timestampUs <= mLastFrameTimestampUs) {
1150 ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
1151 (long long)timestampUs, (long long)mLastFrameTimestampUs);
1152 return true;
1153 }
1154 if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
1155 ++mNumGlitches;
1156 }
1157 }
1158
1159 mLastFrameTimestampUs = timestampUs;
1160 if (mNumFramesReceived == 0) {
1161 mFirstFrameTimeUs = timestampUs;
1162 // Initial delay
1163 if (mStartTimeUs > 0) {
1164 if (timestampUs < mStartTimeUs) {
1165 // Frame was captured before recording was started
1166 // Drop it without updating the statistical data.
1167 return true;
1168 }
1169 mStartTimeUs = timestampUs - mStartTimeUs;
1170 }
1171 }
1172
1173 return false;
1174 }
1175
dataCallbackTimestamp(int64_t timestampUs,int32_t msgType __unused,const sp<IMemory> & data)1176 void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
1177 int32_t msgType __unused, const sp<IMemory> &data) {
1178 ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
1179 Mutex::Autolock autoLock(mLock);
1180
1181 if (shouldSkipFrameLocked(timestampUs)) {
1182 releaseOneRecordingFrame(data);
1183 return;
1184 }
1185
1186 ++mNumFramesReceived;
1187
1188 CHECK(data != NULL && data->size() > 0);
1189 mFramesReceived.push_back(data);
1190 int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
1191 mFrameTimes.push_back(timeUs);
1192 ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
1193 mStartTimeUs, timeUs);
1194 mFrameAvailableCondition.signal();
1195 }
1196
releaseRecordingFrameHandle(native_handle_t * handle)1197 void CameraSource::releaseRecordingFrameHandle(native_handle_t* handle) {
1198 if (mCameraRecordingProxy != nullptr) {
1199 mCameraRecordingProxy->releaseRecordingFrameHandle(handle);
1200 } else if (mCamera != nullptr) {
1201 int64_t token = IPCThreadState::self()->clearCallingIdentity();
1202 mCamera->releaseRecordingFrameHandle(handle);
1203 IPCThreadState::self()->restoreCallingIdentity(token);
1204 } else {
1205 native_handle_close(handle);
1206 native_handle_delete(handle);
1207 }
1208 }
1209
releaseRecordingFrameHandleBatch(const std::vector<native_handle_t * > & handles)1210 void CameraSource::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
1211 if (mCameraRecordingProxy != nullptr) {
1212 mCameraRecordingProxy->releaseRecordingFrameHandleBatch(handles);
1213 } else if (mCamera != nullptr) {
1214 int64_t token = IPCThreadState::self()->clearCallingIdentity();
1215 mCamera->releaseRecordingFrameHandleBatch(handles);
1216 IPCThreadState::self()->restoreCallingIdentity(token);
1217 } else {
1218 for (auto& handle : handles) {
1219 native_handle_close(handle);
1220 native_handle_delete(handle);
1221 }
1222 }
1223 }
1224
recordingFrameHandleCallbackTimestamp(int64_t timestampUs,native_handle_t * handle)1225 void CameraSource::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
1226 native_handle_t* handle) {
1227 ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
1228 Mutex::Autolock autoLock(mLock);
1229 if (handle == nullptr) return;
1230
1231 if (shouldSkipFrameLocked(timestampUs)) {
1232 releaseRecordingFrameHandle(handle);
1233 return;
1234 }
1235
1236 while (mMemoryBases.empty()) {
1237 if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
1238 TIMED_OUT) {
1239 ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
1240 releaseRecordingFrameHandle(handle);
1241 return;
1242 }
1243 }
1244
1245 ++mNumFramesReceived;
1246
1247 sp<IMemory> data = *mMemoryBases.begin();
1248 mMemoryBases.erase(mMemoryBases.begin());
1249
1250 // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
1251 VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
1252 metadata->eType = kMetadataBufferTypeNativeHandleSource;
1253 metadata->pHandle = handle;
1254
1255 mFramesReceived.push_back(data);
1256 int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
1257 mFrameTimes.push_back(timeUs);
1258 ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
1259 mFrameAvailableCondition.signal();
1260 }
1261
recordingFrameHandleCallbackTimestampBatch(const std::vector<int64_t> & timestampsUs,const std::vector<native_handle_t * > & handles)1262 void CameraSource::recordingFrameHandleCallbackTimestampBatch(
1263 const std::vector<int64_t>& timestampsUs,
1264 const std::vector<native_handle_t*>& handles) {
1265 size_t n = timestampsUs.size();
1266 if (n != handles.size()) {
1267 ALOGE("%s: timestampsUs(%zu) and handles(%zu) size mismatch!",
1268 __FUNCTION__, timestampsUs.size(), handles.size());
1269 }
1270
1271 Mutex::Autolock autoLock(mLock);
1272 int batchSize = 0;
1273 for (size_t i = 0; i < n; i++) {
1274 int64_t timestampUs = timestampsUs[i];
1275 native_handle_t* handle = handles[i];
1276
1277 ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
1278 if (handle == nullptr) continue;
1279
1280 if (shouldSkipFrameLocked(timestampUs)) {
1281 releaseRecordingFrameHandle(handle);
1282 continue;
1283 }
1284
1285 while (mMemoryBases.empty()) {
1286 if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
1287 TIMED_OUT) {
1288 ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
1289 releaseRecordingFrameHandle(handle);
1290 continue;
1291 }
1292 }
1293 ++batchSize;
1294 ++mNumFramesReceived;
1295 sp<IMemory> data = *mMemoryBases.begin();
1296 mMemoryBases.erase(mMemoryBases.begin());
1297
1298 // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
1299 VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
1300 metadata->eType = kMetadataBufferTypeNativeHandleSource;
1301 metadata->pHandle = handle;
1302
1303 mFramesReceived.push_back(data);
1304 int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
1305 mFrameTimes.push_back(timeUs);
1306 ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
1307
1308 }
1309 if (batchSize > 0) {
1310 Mutex::Autolock autoLock(mBatchLock);
1311 mInflightBatchSizes.push_back(batchSize);
1312 }
1313 for (int i = 0; i < batchSize; i++) {
1314 mFrameAvailableCondition.signal();
1315 }
1316 }
1317
BufferQueueListener(const sp<BufferItemConsumer> & consumer,const sp<CameraSource> & cameraSource)1318 CameraSource::BufferQueueListener::BufferQueueListener(const sp<BufferItemConsumer>& consumer,
1319 const sp<CameraSource>& cameraSource) {
1320 mConsumer = consumer;
1321 mConsumer->setFrameAvailableListener(this);
1322 mCameraSource = cameraSource;
1323 }
1324
onFrameAvailable(const BufferItem &)1325 void CameraSource::BufferQueueListener::onFrameAvailable(const BufferItem& /*item*/) {
1326 ALOGV("%s: onFrameAvailable", __FUNCTION__);
1327
1328 Mutex::Autolock l(mLock);
1329
1330 if (!mFrameAvailable) {
1331 mFrameAvailable = true;
1332 mFrameAvailableSignal.signal();
1333 }
1334 }
1335
threadLoop()1336 bool CameraSource::BufferQueueListener::threadLoop() {
1337 if (mConsumer == nullptr || mCameraSource == nullptr) {
1338 return false;
1339 }
1340
1341 {
1342 Mutex::Autolock l(mLock);
1343 while (!mFrameAvailable) {
1344 if (mFrameAvailableSignal.waitRelative(mLock, kFrameAvailableTimeout) == TIMED_OUT) {
1345 return true;
1346 }
1347 }
1348 mFrameAvailable = false;
1349 }
1350
1351 BufferItem buffer;
1352 while (mConsumer->acquireBuffer(&buffer, 0) == OK) {
1353 mCameraSource->processBufferQueueFrame(buffer);
1354 }
1355
1356 return true;
1357 }
1358
processBufferQueueFrame(BufferItem & buffer)1359 void CameraSource::processBufferQueueFrame(BufferItem& buffer) {
1360 Mutex::Autolock autoLock(mLock);
1361
1362 int64_t timestampUs = buffer.mTimestamp / 1000;
1363 if (shouldSkipFrameLocked(timestampUs)) {
1364 mVideoBufferConsumer->releaseBuffer(buffer);
1365 return;
1366 }
1367
1368 while (mMemoryBases.empty()) {
1369 if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
1370 TIMED_OUT) {
1371 ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
1372 mVideoBufferConsumer->releaseBuffer(buffer);
1373 return;
1374 }
1375 }
1376
1377 ++mNumFramesReceived;
1378
1379 // Find a available memory slot to store the buffer as VideoNativeMetadata.
1380 sp<IMemory> data = *mMemoryBases.begin();
1381 mMemoryBases.erase(mMemoryBases.begin());
1382
1383 ssize_t offset;
1384 size_t size;
1385 sp<IMemoryHeap> heap = data->getMemory(&offset, &size);
1386 VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
1387 (uint8_t*)heap->getBase() + offset);
1388 memset(payload, 0, sizeof(VideoNativeMetadata));
1389 payload->eType = kMetadataBufferTypeANWBuffer;
1390 payload->pBuffer = buffer.mGraphicBuffer->getNativeBuffer();
1391 payload->nFenceFd = -1;
1392
1393 // Add the mapping so we can find the corresponding buffer item to release to the buffer queue
1394 // when the encoder returns the native window buffer.
1395 mReceivedBufferItemMap.add(payload->pBuffer, buffer);
1396
1397 mFramesReceived.push_back(data);
1398 int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
1399 mFrameTimes.push_back(timeUs);
1400 ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
1401 mStartTimeUs, timeUs);
1402 mFrameAvailableCondition.signal();
1403 }
1404
metaDataStoredInVideoBuffers() const1405 MetadataBufferType CameraSource::metaDataStoredInVideoBuffers() const {
1406 ALOGV("metaDataStoredInVideoBuffers");
1407
1408 // Output buffers will contain metadata if camera sends us buffer in metadata mode or via
1409 // buffer queue.
1410 switch (mVideoBufferMode) {
1411 case hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA:
1412 return kMetadataBufferTypeNativeHandleSource;
1413 case hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE:
1414 return kMetadataBufferTypeANWBuffer;
1415 default:
1416 return kMetadataBufferTypeInvalid;
1417 }
1418 }
1419
ProxyListener(const sp<CameraSource> & source)1420 CameraSource::ProxyListener::ProxyListener(const sp<CameraSource>& source) {
1421 mSource = source;
1422 }
1423
dataCallbackTimestamp(nsecs_t timestamp,int32_t msgType,const sp<IMemory> & dataPtr)1424 void CameraSource::ProxyListener::dataCallbackTimestamp(
1425 nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) {
1426 mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr);
1427 }
1428
recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,native_handle_t * handle)1429 void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
1430 native_handle_t* handle) {
1431 mSource->recordingFrameHandleCallbackTimestamp(timestamp / 1000, handle);
1432 }
1433
recordingFrameHandleCallbackTimestampBatch(const std::vector<int64_t> & timestampsUs,const std::vector<native_handle_t * > & handles)1434 void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestampBatch(
1435 const std::vector<int64_t>& timestampsUs,
1436 const std::vector<native_handle_t*>& handles) {
1437 int n = timestampsUs.size();
1438 std::vector<nsecs_t> modifiedTimestamps(n);
1439 for (int i = 0; i < n; i++) {
1440 modifiedTimestamps[i] = timestampsUs[i] / 1000;
1441 }
1442 mSource->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
1443 }
1444
binderDied(const wp<IBinder> & who __unused)1445 void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
1446 ALOGI("Camera recording proxy died");
1447 }
1448
1449 } // namespace android
1450