1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 //#define LOG_NNDEBUG 0
19 #define LOG_TAG "EmulatedCamera2_Sensor"
20 
21 #ifdef LOG_NNDEBUG
22 #define ALOGVV(...) ALOGV(__VA_ARGS__)
23 #else
24 #define ALOGVV(...) ((void)0)
25 #endif
26 
27 #include <utils/Log.h>
28 
29 #include <cmath>
30 #include <cstdlib>
31 #include "../EmulatedFakeCamera2.h"
32 #include "Sensor.h"
33 #include "system/camera_metadata.h"
34 
35 namespace android {
36 
37 // const nsecs_t Sensor::kExposureTimeRange[2] =
38 //    {1000L, 30000000000L} ; // 1 us - 30 sec
39 // const nsecs_t Sensor::kFrameDurationRange[2] =
40 //    {33331760L, 30000000000L}; // ~1/30 s - 30 sec
41 const nsecs_t Sensor::kExposureTimeRange[2] = {1000L,
42                                                300000000L};  // 1 us - 0.3 sec
43 const nsecs_t Sensor::kFrameDurationRange[2] = {
44     33331760L, 300000000L};  // ~1/30 s - 0.3 sec
45 
46 const nsecs_t Sensor::kMinVerticalBlank = 10000L;
47 
48 const uint8_t Sensor::kColorFilterArrangement =
49     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGGB;
50 
51 // Output image data characteristics
52 const uint32_t Sensor::kMaxRawValue = 4000;
53 const uint32_t Sensor::kBlackLevel = 1000;
54 
55 // Sensor sensitivity
56 const float Sensor::kSaturationVoltage = 0.520f;
57 const uint32_t Sensor::kSaturationElectrons = 2000;
58 const float Sensor::kVoltsPerLuxSecond = 0.100f;
59 
60 const float Sensor::kElectronsPerLuxSecond = Sensor::kSaturationElectrons /
61                                              Sensor::kSaturationVoltage *
62                                              Sensor::kVoltsPerLuxSecond;
63 
64 const float Sensor::kBaseGainFactor =
65     (float)Sensor::kMaxRawValue / Sensor::kSaturationElectrons;
66 
67 const float Sensor::kReadNoiseStddevBeforeGain = 1.177;  // in electrons
68 const float Sensor::kReadNoiseStddevAfterGain = 2.100;   // in digital counts
69 const float Sensor::kReadNoiseVarBeforeGain =
70     Sensor::kReadNoiseStddevBeforeGain * Sensor::kReadNoiseStddevBeforeGain;
71 const float Sensor::kReadNoiseVarAfterGain =
72     Sensor::kReadNoiseStddevAfterGain * Sensor::kReadNoiseStddevAfterGain;
73 
74 const int32_t Sensor::kSensitivityRange[2] = {100, 1600};
75 const uint32_t Sensor::kDefaultSensitivity = 100;
76 
77 /** A few utility functions for math, normal distributions */
78 
79 // Take advantage of IEEE floating-point format to calculate an approximate
80 // square root. Accurate to within +-3.6%
sqrtf_approx(float r)81 float sqrtf_approx(float r) {
82   // Modifier is based on IEEE floating-point representation; the
83   // manipulations boil down to finding approximate log2, dividing by two, and
84   // then inverting the log2. A bias is added to make the relative error
85   // symmetric about the real answer.
86   const int32_t modifier = 0x1FBB4000;
87 
88   int32_t r_i = *(int32_t *)(&r);
89   r_i = (r_i >> 1) + modifier;
90 
91   return *(float *)(&r_i);
92 }
93 
Sensor(uint32_t width,uint32_t height)94 Sensor::Sensor(uint32_t width, uint32_t height)
95     : Thread(false),
96       mResolution{width, height},
97       mActiveArray{0, 0, width, height},
98       mRowReadoutTime(kFrameDurationRange[0] / height),
99       mGotVSync(false),
100       mExposureTime(kFrameDurationRange[0] - kMinVerticalBlank),
101       mFrameDuration(kFrameDurationRange[0]),
102       mGainFactor(kDefaultSensitivity),
103       mNextBuffers(NULL),
104       mFrameNumber(0),
105       mCapturedBuffers(NULL),
106       mListener(NULL),
107       mScene(width, height, kElectronsPerLuxSecond) {
108   ALOGV("Sensor created with pixel array %d x %d", width, height);
109 }
110 
~Sensor()111 Sensor::~Sensor() { shutDown(); }
112 
startUp()113 status_t Sensor::startUp() {
114   ALOGV("%s: E", __FUNCTION__);
115 
116   int res;
117   mCapturedBuffers = NULL;
118   res = run("EmulatedFakeCamera2::Sensor", ANDROID_PRIORITY_URGENT_DISPLAY);
119 
120   if (res != OK) {
121     ALOGE("Unable to start up sensor capture thread: %d", res);
122   }
123   return res;
124 }
125 
shutDown()126 status_t Sensor::shutDown() {
127   ALOGV("%s: E", __FUNCTION__);
128 
129   int res;
130   res = requestExitAndWait();
131   if (res != OK) {
132     ALOGE("Unable to shut down sensor capture thread: %d", res);
133   }
134   return res;
135 }
136 
getScene()137 Scene &Sensor::getScene() { return mScene; }
138 
setExposureTime(uint64_t ns)139 void Sensor::setExposureTime(uint64_t ns) {
140   Mutex::Autolock lock(mControlMutex);
141   ALOGVV("Exposure set to %f", ns / 1000000.f);
142   mExposureTime = ns;
143 }
144 
setFrameDuration(uint64_t ns)145 void Sensor::setFrameDuration(uint64_t ns) {
146   Mutex::Autolock lock(mControlMutex);
147   ALOGVV("Frame duration set to %f", ns / 1000000.f);
148   mFrameDuration = ns;
149 }
150 
setSensitivity(uint32_t gain)151 void Sensor::setSensitivity(uint32_t gain) {
152   Mutex::Autolock lock(mControlMutex);
153   ALOGVV("Gain set to %d", gain);
154   mGainFactor = gain;
155 }
156 
setDestinationBuffers(Buffers * buffers)157 void Sensor::setDestinationBuffers(Buffers *buffers) {
158   Mutex::Autolock lock(mControlMutex);
159   mNextBuffers = buffers;
160 }
161 
setFrameNumber(uint32_t frameNumber)162 void Sensor::setFrameNumber(uint32_t frameNumber) {
163   Mutex::Autolock lock(mControlMutex);
164   mFrameNumber = frameNumber;
165 }
166 
waitForVSync(nsecs_t reltime)167 bool Sensor::waitForVSync(nsecs_t reltime) {
168   int res;
169   Mutex::Autolock lock(mControlMutex);
170 
171   mGotVSync = false;
172   res = mVSync.waitRelative(mControlMutex, reltime);
173   if (res != OK && res != TIMED_OUT) {
174     ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
175     return false;
176   }
177   return mGotVSync;
178 }
179 
waitForNewFrame(nsecs_t reltime,nsecs_t * captureTime)180 bool Sensor::waitForNewFrame(nsecs_t reltime, nsecs_t *captureTime) {
181   Mutex::Autolock lock(mReadoutMutex);
182 
183   if (mCapturedBuffers == NULL) {
184     int res;
185     res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
186     if (res == TIMED_OUT) {
187       return false;
188     } else if (res != OK || mCapturedBuffers == NULL) {
189       ALOGE("Error waiting for sensor readout signal: %d", res);
190       return false;
191     }
192   }
193   mReadoutComplete.signal();
194 
195   *captureTime = mCaptureTime;
196   mCapturedBuffers = NULL;
197   return true;
198 }
199 
~SensorListener()200 Sensor::SensorListener::~SensorListener() {}
201 
setSensorListener(SensorListener * listener)202 void Sensor::setSensorListener(SensorListener *listener) {
203   Mutex::Autolock lock(mControlMutex);
204   mListener = listener;
205 }
206 
readyToRun()207 status_t Sensor::readyToRun() {
208   ALOGV("Starting up sensor thread");
209   mStartupTime = systemTime();
210   mNextCaptureTime = 0;
211   mNextCapturedBuffers = NULL;
212   return OK;
213 }
214 
threadLoop()215 bool Sensor::threadLoop() {
216   /**
217    * Sensor capture operation main loop.
218    *
219    * Stages are out-of-order relative to a single frame's processing, but
220    * in-order in time.
221    */
222 
223   /**
224    * Stage 1: Read in latest control parameters
225    */
226   uint64_t exposureDuration;
227   uint64_t frameDuration;
228   uint32_t gain;
229   Buffers *nextBuffers;
230   uint32_t frameNumber;
231   SensorListener *listener = NULL;
232   {
233     Mutex::Autolock lock(mControlMutex);
234     exposureDuration = mExposureTime;
235     frameDuration = mFrameDuration;
236     gain = mGainFactor;
237     nextBuffers = mNextBuffers;
238     frameNumber = mFrameNumber;
239     listener = mListener;
240     // Don't reuse a buffer set
241     mNextBuffers = NULL;
242 
243     // Signal VSync for start of readout
244     ALOGVV("Sensor VSync");
245     mGotVSync = true;
246     mVSync.signal();
247   }
248 
249   /**
250    * Stage 3: Read out latest captured image
251    */
252 
253   Buffers *capturedBuffers = NULL;
254   nsecs_t captureTime = 0;
255 
256   nsecs_t startRealTime = systemTime();
257   // Stagefright cares about system time for timestamps, so base simulated
258   // time on that.
259   nsecs_t simulatedTime = startRealTime;
260   nsecs_t frameEndRealTime = startRealTime + frameDuration;
261 
262   if (mNextCapturedBuffers != NULL) {
263     ALOGVV("Sensor starting readout");
264     // Pretend we're doing readout now; will signal once enough time has elapsed
265     capturedBuffers = mNextCapturedBuffers;
266     captureTime = mNextCaptureTime;
267   }
268   simulatedTime += mRowReadoutTime + kMinVerticalBlank;
269 
270   // TODO: Move this signal to another thread to simulate readout
271   // time properly
272   if (capturedBuffers != NULL) {
273     ALOGVV("Sensor readout complete");
274     Mutex::Autolock lock(mReadoutMutex);
275     if (mCapturedBuffers != NULL) {
276       ALOGV("Waiting for readout thread to catch up!");
277       mReadoutComplete.wait(mReadoutMutex);
278     }
279 
280     mCapturedBuffers = capturedBuffers;
281     mCaptureTime = captureTime;
282     mReadoutAvailable.signal();
283     capturedBuffers = NULL;
284   }
285 
286   /**
287    * Stage 2: Capture new image
288    */
289   mNextCaptureTime = simulatedTime;
290   mNextCapturedBuffers = nextBuffers;
291 
292   if (mNextCapturedBuffers != NULL) {
293     if (listener != NULL) {
294       listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
295                               mNextCaptureTime);
296     }
297     ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
298            (float)exposureDuration / 1e6, gain);
299     mScene.setExposureDuration((float)exposureDuration / 1e9);
300     mScene.calculateScene(mNextCaptureTime);
301 
302     // Might be adding more buffers, so size isn't constant
303     for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
304       const StreamBuffer &b = (*mNextCapturedBuffers)[i];
305       ALOGVV(
306           "Sensor capturing buffer %d: stream %d,"
307           " %d x %d, format %x, stride %d, buf %p, img %p",
308           i, b.streamId, b.width, b.height, b.format, b.stride, b.buffer,
309           b.img);
310       switch (b.format) {
311         case HAL_PIXEL_FORMAT_RAW16:
312           captureRaw(b.img, gain, b.stride);
313           break;
314         case HAL_PIXEL_FORMAT_RGB_888:
315           captureRGB(b.img, gain, b.stride);
316           break;
317         case HAL_PIXEL_FORMAT_RGBA_8888:
318           captureRGBA(b.img, gain, b.stride);
319           break;
320         case HAL_PIXEL_FORMAT_BLOB:
321 #if defined HAL_DATASPACE_DEPTH
322           if (b.dataSpace != HAL_DATASPACE_DEPTH) {
323 #endif
324             // Add auxillary buffer of the right size
325             // Assumes only one BLOB (JPEG) buffer in
326             // mNextCapturedBuffers
327             StreamBuffer bAux;
328             bAux.streamId = 0;
329             bAux.width = b.width;
330             bAux.height = b.height;
331             bAux.format = HAL_PIXEL_FORMAT_RGB_888;
332             bAux.stride = b.width;
333             bAux.buffer = NULL;
334             // TODO: Reuse these
335             bAux.img = new uint8_t[b.width * b.height * 3];
336             mNextCapturedBuffers->push_back(bAux);
337 #if defined HAL_DATASPACE_DEPTH
338           } else {
339             captureDepthCloud(b.img);
340           }
341 #endif
342           break;
343         case HAL_PIXEL_FORMAT_YCrCb_420_SP:
344         case HAL_PIXEL_FORMAT_YCbCr_420_888:
345           captureNV21(b.img, gain, b.stride);
346           break;
347         case HAL_PIXEL_FORMAT_YV12:
348           // TODO:
349           ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
350           break;
351         case HAL_PIXEL_FORMAT_Y16:
352           captureDepth(b.img, gain, b.stride);
353           break;
354         default:
355           ALOGE("%s: Unknown format %x, no output", __FUNCTION__, b.format);
356           break;
357       }
358     }
359   }
360 
361   ALOGVV("Sensor vertical blanking interval");
362   nsecs_t workDoneRealTime = systemTime();
363   const nsecs_t timeAccuracy = 2e6;  // 2 ms of imprecision is ok
364   if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
365     timespec t;
366     t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L;
367     t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
368 
369     int ret;
370     do {
371       ret = nanosleep(&t, &t);
372     } while (ret != 0);
373   }
374   nsecs_t endRealTime __unused = systemTime();
375   ALOGVV("Frame cycle took %d ms, target %d ms",
376          (int)((endRealTime - startRealTime) / 1000000),
377          (int)(frameDuration / 1000000));
378   return true;
379 };
380 
captureRaw(uint8_t * img,uint32_t gain,uint32_t stride)381 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
382   float totalGain = gain / 100.0 * kBaseGainFactor;
383   float noiseVarGain = totalGain * totalGain;
384   float readNoiseVar =
385       kReadNoiseVarBeforeGain * noiseVarGain + kReadNoiseVarAfterGain;
386 
387   int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B};  // RGGB
388   mScene.setReadoutPixel(0, 0);
389   for (unsigned int y = 0; y < mResolution[1]; y++) {
390     int *bayerRow = bayerSelect + (y & 0x1) * 2;
391     uint16_t *px = (uint16_t *)img + y * stride;
392     for (unsigned int x = 0; x < mResolution[0]; x++) {
393       uint32_t electronCount;
394       electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
395 
396       // TODO: Better pixel saturation curve?
397       electronCount = (electronCount < kSaturationElectrons)
398                           ? electronCount
399                           : kSaturationElectrons;
400 
401       // TODO: Better A/D saturation curve?
402       uint16_t rawCount = electronCount * totalGain;
403       rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
404 
405       // Calculate noise value
406       // TODO: Use more-correct Gaussian instead of uniform noise
407       float photonNoiseVar = electronCount * noiseVarGain;
408       float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
409       // Scaled to roughly match gaussian/uniform noise stddev
410       float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
411 
412       rawCount += kBlackLevel;
413       rawCount += noiseStddev * noiseSample;
414 
415       *px++ = rawCount;
416     }
417     // TODO: Handle this better
418     // simulatedTime += mRowReadoutTime;
419   }
420   ALOGVV("Raw sensor image captured");
421 }
422 
captureRGBA(uint8_t * img,uint32_t gain,uint32_t stride)423 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
424   float totalGain = gain / 100.0 * kBaseGainFactor;
425   // In fixed-point math, calculate total scaling from electrons to 8bpp
426   int scale64x = 64 * totalGain * 255 / kMaxRawValue;
427   uint32_t inc = ceil((float)mResolution[0] / stride);
428 
429   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
430     uint8_t *px = img + outY * stride * 4;
431     mScene.setReadoutPixel(0, y);
432     for (unsigned int x = 0; x < mResolution[0]; x += inc) {
433       uint32_t rCount, gCount, bCount;
434       // TODO: Perfect demosaicing is a cheat
435       const uint32_t *pixel = mScene.getPixelElectrons();
436       rCount = pixel[Scene::R] * scale64x;
437       gCount = pixel[Scene::Gr] * scale64x;
438       bCount = pixel[Scene::B] * scale64x;
439 
440       *px++ = rCount < 255 * 64 ? rCount / 64 : 255;
441       *px++ = gCount < 255 * 64 ? gCount / 64 : 255;
442       *px++ = bCount < 255 * 64 ? bCount / 64 : 255;
443       *px++ = 255;
444       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
445     }
446     // TODO: Handle this better
447     // simulatedTime += mRowReadoutTime;
448   }
449   ALOGVV("RGBA sensor image captured");
450 }
451 
captureRGB(uint8_t * img,uint32_t gain,uint32_t stride)452 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
453   float totalGain = gain / 100.0 * kBaseGainFactor;
454   // In fixed-point math, calculate total scaling from electrons to 8bpp
455   int scale64x = 64 * totalGain * 255 / kMaxRawValue;
456   uint32_t inc = ceil((float)mResolution[0] / stride);
457 
458   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
459     mScene.setReadoutPixel(0, y);
460     uint8_t *px = img + outY * stride * 3;
461     for (unsigned int x = 0; x < mResolution[0]; x += inc) {
462       uint32_t rCount, gCount, bCount;
463       // TODO: Perfect demosaicing is a cheat
464       const uint32_t *pixel = mScene.getPixelElectrons();
465       rCount = pixel[Scene::R] * scale64x;
466       gCount = pixel[Scene::Gr] * scale64x;
467       bCount = pixel[Scene::B] * scale64x;
468 
469       *px++ = rCount < 255 * 64 ? rCount / 64 : 255;
470       *px++ = gCount < 255 * 64 ? gCount / 64 : 255;
471       *px++ = bCount < 255 * 64 ? bCount / 64 : 255;
472       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
473     }
474     // TODO: Handle this better
475     // simulatedTime += mRowReadoutTime;
476   }
477   ALOGVV("RGB sensor image captured");
478 }
479 
captureNV21(uint8_t * img,uint32_t gain,uint32_t stride)480 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
481   float totalGain = gain / 100.0 * kBaseGainFactor;
482   // Using fixed-point math with 6 bits of fractional precision.
483   // In fixed-point math, calculate total scaling from electrons to 8bpp
484   const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
485   // In fixed-point math, saturation point of sensor after gain
486   const int saturationPoint = 64 * 255;
487   // Fixed-point coefficients for RGB-YUV transform
488   // Based on JFIF RGB->YUV transform.
489   // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
490   const int rgbToY[] = {19, 37, 7};
491   const int rgbToCb[] = {-10, -21, 32, 524288};
492   const int rgbToCr[] = {32, -26, -5, 524288};
493   // Scale back to 8bpp non-fixed-point
494   const int scaleOut = 64;
495   const int scaleOutSq = scaleOut * scaleOut;  // after multiplies
496 
497   // inc = how many pixels to skip while reading every next pixel
498   // horizontally.
499   uint32_t inc = ceil((float)mResolution[0] / stride);
500   // outH = projected vertical resolution based on stride.
501   uint32_t outH = mResolution[1] / inc;
502   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
503     uint8_t *pxY = img + outY * stride;
504     uint8_t *pxVU = img + (outH + outY / 2) * stride;
505     mScene.setReadoutPixel(0, y);
506     for (unsigned int outX = 0; outX < stride; outX++) {
507       int32_t rCount, gCount, bCount;
508       // TODO: Perfect demosaicing is a cheat
509       const uint32_t *pixel = mScene.getPixelElectrons();
510       rCount = pixel[Scene::R] * scale64x;
511       rCount = rCount < saturationPoint ? rCount : saturationPoint;
512       gCount = pixel[Scene::Gr] * scale64x;
513       gCount = gCount < saturationPoint ? gCount : saturationPoint;
514       bCount = pixel[Scene::B] * scale64x;
515       bCount = bCount < saturationPoint ? bCount : saturationPoint;
516 
517       *pxY++ = (rgbToY[0] * rCount + rgbToY[1] * gCount + rgbToY[2] * bCount) /
518                scaleOutSq;
519       if (outY % 2 == 0 && outX % 2 == 0) {
520         *pxVU++ = (rgbToCb[0] * rCount + rgbToCb[1] * gCount +
521                    rgbToCb[2] * bCount + rgbToCb[3]) /
522                   scaleOutSq;
523         *pxVU++ = (rgbToCr[0] * rCount + rgbToCr[1] * gCount +
524                    rgbToCr[2] * bCount + rgbToCr[3]) /
525                   scaleOutSq;
526       }
527 
528       // Skip unprocessed pixels from sensor.
529       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
530     }
531   }
532   ALOGVV("NV21 sensor image captured");
533 }
534 
captureDepth(uint8_t * img,uint32_t gain,uint32_t stride)535 void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t stride) {
536   float totalGain = gain / 100.0 * kBaseGainFactor;
537   // In fixed-point math, calculate scaling factor to 13bpp millimeters
538   int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
539   uint32_t inc = ceil((float)mResolution[0] / stride);
540 
541   for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
542     mScene.setReadoutPixel(0, y);
543     uint16_t *px = ((uint16_t *)img) + outY * stride;
544     for (unsigned int x = 0; x < mResolution[0]; x += inc) {
545       uint32_t depthCount;
546       // TODO: Make up real depth scene instead of using green channel
547       // as depth
548       const uint32_t *pixel = mScene.getPixelElectrons();
549       depthCount = pixel[Scene::Gr] * scale64x;
550 
551       *px++ = depthCount < 8191 * 64 ? depthCount / 64 : 0;
552       for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
553     }
554     // TODO: Handle this better
555     // simulatedTime += mRowReadoutTime;
556   }
557   ALOGVV("Depth sensor image captured");
558 }
559 
captureDepthCloud(uint8_t *)560 void Sensor::captureDepthCloud(uint8_t * /*img*/) {
561 #if defined HAL_DATASPACE_DEPTH
562   android_depth_points *cloud = reinterpret_cast<android_depth_points *>(img);
563 
564   cloud->num_points = 16;
565 
566   // TODO: Create point cloud values that match RGB scene
567   const int FLOATS_PER_POINT = 4;
568   const float JITTER_STDDEV = 0.1f;
569   for (size_t y = 0, i = 0; y < 4; y++) {
570     for (size_t x = 0; x < 4; x++, i++) {
571       float randSampleX = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
572       randSampleX *= JITTER_STDDEV;
573 
574       float randSampleY = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
575       randSampleY *= JITTER_STDDEV;
576 
577       float randSampleZ = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
578       randSampleZ *= JITTER_STDDEV;
579 
580       cloud->xyzc_points[i * FLOATS_PER_POINT + 0] = x - 1.5f + randSampleX;
581       cloud->xyzc_points[i * FLOATS_PER_POINT + 1] = y - 1.5f + randSampleY;
582       cloud->xyzc_points[i * FLOATS_PER_POINT + 2] = 3.f + randSampleZ;
583       cloud->xyzc_points[i * FLOATS_PER_POINT + 3] = 0.8f;
584     }
585   }
586 
587   ALOGVV("Depth point cloud captured");
588 #endif
589 }
590 
591 }  // namespace android
592