1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package android.graphics;
18 
19 import android.annotation.IntDef;
20 
21 import java.lang.annotation.Retention;
22 import java.lang.annotation.RetentionPolicy;
23 
24 public class ImageFormat {
25      /** @hide */
26      @Retention(RetentionPolicy.SOURCE)
27      @IntDef(value = {
28              UNKNOWN,
29              RGB_565,
30              YV12,
31              Y8,
32              Y16,
33              NV16,
34              NV21,
35              YUY2,
36              JPEG,
37              DEPTH_JPEG,
38              YUV_420_888,
39              YUV_422_888,
40              YUV_444_888,
41              FLEX_RGB_888,
42              FLEX_RGBA_8888,
43              RAW_SENSOR,
44              RAW_PRIVATE,
45              RAW10,
46              RAW12,
47              DEPTH16,
48              DEPTH_POINT_CLOUD,
49              RAW_DEPTH,
50              PRIVATE,
51              HEIC
52      })
53      public @interface Format {
54      }
55 
56     /*
57      * these constants are chosen to be binary compatible with their previous
58      * location in PixelFormat.java
59      */
60 
61     public static final int UNKNOWN = 0;
62 
63     /**
64      * RGB format used for pictures encoded as RGB_565. See
65      * {@link android.hardware.Camera.Parameters#setPictureFormat(int)}.
66      */
67     public static final int RGB_565 = 4;
68 
69     /**
70      * <p>Android YUV format.</p>
71      *
72      * <p>This format is exposed to software decoders and applications.</p>
73      *
74      * <p>YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
75      * by (W/2) x (H/2) Cr and Cb planes.</p>
76      *
77      * <p>This format assumes
78      * <ul>
79      * <li>an even width</li>
80      * <li>an even height</li>
81      * <li>a horizontal stride multiple of 16 pixels</li>
82      * <li>a vertical stride equal to the height</li>
83      * </ul>
84      * </p>
85      *
86      * <pre> y_size = stride * height
87      * c_stride = ALIGN(stride/2, 16)
88      * c_size = c_stride * height/2
89      * size = y_size + c_size * 2
90      * cr_offset = y_size
91      * cb_offset = y_size + c_size</pre>
92      *
93      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
94      * recommended for YUV output instead.</p>
95      *
96      * <p>For the older camera API, this format is guaranteed to be supported for
97      * {@link android.hardware.Camera} preview images since API level 12; for earlier API versions,
98      * check {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.
99      *
100      * <p>Note that for camera preview callback use (see
101      * {@link android.hardware.Camera#setPreviewCallback}), the
102      * <var>stride</var> value is the smallest possible; that is, it is equal
103      * to:
104      *
105      * <pre>stride = ALIGN(width, 16)</pre>
106      *
107      * @see android.hardware.Camera.Parameters#setPreviewCallback
108      * @see android.hardware.Camera.Parameters#setPreviewFormat
109      * @see android.hardware.Camera.Parameters#getSupportedPreviewFormats
110      * </p>
111      */
112     public static final int YV12 = 0x32315659;
113 
114     /**
115      * <p>Android Y8 format.</p>
116      *
117      * <p>Y8 is a YUV planar format comprised of a WxH Y plane only, with each pixel
118      * being represented by 8 bits. It is equivalent to just the Y plane from {@link #YV12}
119      * format.</p>
120      *
121      * <p>This format assumes
122      * <ul>
123      * <li>an even width</li>
124      * <li>an even height</li>
125      * <li>a horizontal stride multiple of 16 pixels</li>
126      * </ul>
127      * </p>
128      *
129      * <pre> size = stride * height </pre>
130      *
131      * <p>For example, the {@link android.media.Image} object can provide data
132      * in this format from a {@link android.hardware.camera2.CameraDevice} (if
133      * supported) through a {@link android.media.ImageReader} object. The
134      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
135      * single plane containing the pixel data. The pixel stride is always 1 in
136      * {@link android.media.Image.Plane#getPixelStride()}, and the
137      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
138      * neighboring pixel distance (in bytes) between adjacent rows.</p>
139      *
140      * @see android.media.Image
141      * @see android.media.ImageReader
142      * @see android.hardware.camera2.CameraDevice
143      */
144     public static final int Y8 = 0x20203859;
145 
146     /**
147      * <p>Android Y16 format.</p>
148      *
149      * Y16 is a YUV planar format comprised of a WxH Y plane, with each pixel
150      * being represented by 16 bits. It is just like {@link #Y8}, but has 16
151      * bits per pixel (little endian).</p>
152      *
153      * <p>This format assumes
154      * <ul>
155      * <li>an even width</li>
156      * <li>an even height</li>
157      * <li>a horizontal stride multiple of 16 pixels</li>
158      * </ul>
159      * </p>
160      *
161      * <pre> y_size = stride * height </pre>
162      *
163      * <p>For example, the {@link android.media.Image} object can provide data
164      * in this format from a {@link android.hardware.camera2.CameraDevice}
165      * through a {@link android.media.ImageReader} object if this format is
166      * supported by {@link android.hardware.camera2.CameraDevice}.</p>
167      *
168      * @see android.media.Image
169      * @see android.media.ImageReader
170      * @see android.hardware.camera2.CameraDevice
171      *
172      * @hide
173      */
174     public static final int Y16 = 0x20363159;
175 
176     /**
177      * YCbCr format, used for video.
178      *
179      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
180      * recommended for YUV output instead.</p>
181      *
182      * <p>Whether this format is supported by the old camera API can be determined by
183      * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
184      *
185      */
186     public static final int NV16 = 0x10;
187 
188     /**
189      * YCrCb format used for images, which uses the NV21 encoding format.
190      *
191      * <p>This is the default format
192      * for {@link android.hardware.Camera} preview images, when not otherwise set with
193      * {@link android.hardware.Camera.Parameters#setPreviewFormat(int)}.</p>
194      *
195      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
196      * recommended for YUV output instead.</p>
197      */
198     public static final int NV21 = 0x11;
199 
200     /**
201      * YCbCr format used for images, which uses YUYV (YUY2) encoding format.
202      *
203      * <p>For the {@link android.hardware.camera2} API, the {@link #YUV_420_888} format is
204      * recommended for YUV output instead.</p>
205      *
206      * <p>This is an alternative format for {@link android.hardware.Camera} preview images. Whether
207      * this format is supported by the camera hardware can be determined by
208      * {@link android.hardware.Camera.Parameters#getSupportedPreviewFormats()}.</p>
209      */
210     public static final int YUY2 = 0x14;
211 
212     /**
213      * Compressed JPEG format.
214      *
215      * <p>This format is always supported as an output format for the
216      * {@link android.hardware.camera2} API, and as a picture format for the older
217      * {@link android.hardware.Camera} API</p>
218      */
219     public static final int JPEG = 0x100;
220 
221     /**
222      * Depth augmented compressed JPEG format.
223      *
224      * <p>JPEG compressed main image along with XMP embedded depth metadata
225      * following ISO 16684-1:2011(E).</p>
226      */
227     public static final int DEPTH_JPEG = 0x69656963;
228 
229     /**
230      * <p>Multi-plane Android YUV 420 format</p>
231      *
232      * <p>This format is a generic YCbCr format, capable of describing any 4:2:0
233      * chroma-subsampled planar or semiplanar buffer (but not fully interleaved),
234      * with 8 bits per color sample.</p>
235      *
236      * <p>Images in this format are always represented by three separate buffers
237      * of data, one for each color plane. Additional information always
238      * accompanies the buffers, describing the row stride and the pixel stride
239      * for each plane.</p>
240      *
241      * <p>The order of planes in the array returned by
242      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
243      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
244      *
245      * <p>The Y-plane is guaranteed not to be interleaved with the U/V planes
246      * (in particular, pixel stride is always 1 in
247      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}).</p>
248      *
249      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
250      * (in particular,
251      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
252      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
253      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
254      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
255      * ).</p>
256      *
257      * <p>For example, the {@link android.media.Image} object can provide data
258      * in this format from a {@link android.hardware.camera2.CameraDevice}
259      * through a {@link android.media.ImageReader} object.</p>
260      *
261      * @see android.media.Image
262      * @see android.media.ImageReader
263      * @see android.hardware.camera2.CameraDevice
264      */
265     public static final int YUV_420_888 = 0x23;
266 
267     /**
268      * <p>Multi-plane Android YUV 422 format</p>
269      *
270      * <p>This format is a generic YCbCr format, capable of describing any 4:2:2
271      * chroma-subsampled (planar, semiplanar or interleaved) format,
272      * with 8 bits per color sample.</p>
273      *
274      * <p>Images in this format are always represented by three separate buffers
275      * of data, one for each color plane. Additional information always
276      * accompanies the buffers, describing the row stride and the pixel stride
277      * for each plane.</p>
278      *
279      * <p>The order of planes in the array returned by
280      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
281      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
282      *
283      * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel
284      * stride greater than 1 in
285      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p>
286      *
287      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
288      * (in particular,
289      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
290      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
291      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
292      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
293      * ).</p>
294      *
295      * <p>For example, the {@link android.media.Image} object can provide data
296      * in this format from a {@link android.media.MediaCodec}
297      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
298      *
299      * @see android.media.Image
300      * @see android.media.MediaCodec
301      */
302     public static final int YUV_422_888 = 0x27;
303 
304     /**
305      * <p>Multi-plane Android YUV 444 format</p>
306      *
307      * <p>This format is a generic YCbCr format, capable of describing any 4:4:4
308      * (planar, semiplanar or interleaved) format,
309      * with 8 bits per color sample.</p>
310      *
311      * <p>Images in this format are always represented by three separate buffers
312      * of data, one for each color plane. Additional information always
313      * accompanies the buffers, describing the row stride and the pixel stride
314      * for each plane.</p>
315      *
316      * <p>The order of planes in the array returned by
317      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
318      * plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).</p>
319      *
320      * <p>In contrast to the {@link #YUV_420_888} format, the Y-plane may have a pixel
321      * stride greater than 1 in
322      * {@link android.media.Image.Plane#getPixelStride() yPlane.getPixelStride()}.</p>
323      *
324      * <p>The U/V planes are guaranteed to have the same row stride and pixel stride
325      * (in particular,
326      * {@link android.media.Image.Plane#getRowStride() uPlane.getRowStride()}
327      * == {@link android.media.Image.Plane#getRowStride() vPlane.getRowStride()} and
328      * {@link android.media.Image.Plane#getPixelStride() uPlane.getPixelStride()}
329      * == {@link android.media.Image.Plane#getPixelStride() vPlane.getPixelStride()};
330      * ).</p>
331      *
332      * <p>For example, the {@link android.media.Image} object can provide data
333      * in this format from a {@link android.media.MediaCodec}
334      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
335      *
336      * @see android.media.Image
337      * @see android.media.MediaCodec
338      */
339     public static final int YUV_444_888 = 0x28;
340 
341     /**
342      * <p>Multi-plane Android RGB format</p>
343      *
344      * <p>This format is a generic RGB format, capable of describing most RGB formats,
345      * with 8 bits per color sample.</p>
346      *
347      * <p>Images in this format are always represented by three separate buffers
348      * of data, one for each color plane. Additional information always
349      * accompanies the buffers, describing the row stride and the pixel stride
350      * for each plane.</p>
351      *
352      * <p>The order of planes in the array returned by
353      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
354      * plane #0 is always R (red), plane #1 is always G (green), and plane #2 is always B
355      * (blue).</p>
356      *
357      * <p>All three planes are guaranteed to have the same row strides and pixel strides.</p>
358      *
359      * <p>For example, the {@link android.media.Image} object can provide data
360      * in this format from a {@link android.media.MediaCodec}
361      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
362      *
363      * @see android.media.Image
364      * @see android.media.MediaCodec
365      */
366     public static final int FLEX_RGB_888 = 0x29;
367 
368     /**
369      * <p>Multi-plane Android RGBA format</p>
370      *
371      * <p>This format is a generic RGBA format, capable of describing most RGBA formats,
372      * with 8 bits per color sample.</p>
373      *
374      * <p>Images in this format are always represented by four separate buffers
375      * of data, one for each color plane. Additional information always
376      * accompanies the buffers, describing the row stride and the pixel stride
377      * for each plane.</p>
378      *
379      * <p>The order of planes in the array returned by
380      * {@link android.media.Image#getPlanes() Image#getPlanes()} is guaranteed such that
381      * plane #0 is always R (red), plane #1 is always G (green), plane #2 is always B (blue),
382      * and plane #3 is always A (alpha). This format may represent pre-multiplied or
383      * non-premultiplied alpha.</p>
384      *
385      * <p>All four planes are guaranteed to have the same row strides and pixel strides.</p>
386      *
387      * <p>For example, the {@link android.media.Image} object can provide data
388      * in this format from a {@link android.media.MediaCodec}
389      * through {@link android.media.MediaCodec#getOutputImage} object.</p>
390      *
391      * @see android.media.Image
392      * @see android.media.MediaCodec
393      */
394     public static final int FLEX_RGBA_8888 = 0x2A;
395 
396     /**
397      * <p>General raw camera sensor image format, usually representing a
398      * single-channel Bayer-mosaic image. Each pixel color sample is stored with
399      * 16 bits of precision.</p>
400      *
401      * <p>The layout of the color mosaic, the maximum and minimum encoding
402      * values of the raw pixel data, the color space of the image, and all other
403      * needed information to interpret a raw sensor image must be queried from
404      * the {@link android.hardware.camera2.CameraDevice} which produced the
405      * image.</p>
406      */
407     public static final int RAW_SENSOR = 0x20;
408 
409     /**
410      * <p>Private raw camera sensor image format, a single channel image with
411      * implementation depedent pixel layout.</p>
412      *
413      * <p>RAW_PRIVATE is a format for unprocessed raw image buffers coming from an
414      * image sensor. The actual structure of buffers of this format is
415      * implementation-dependent.</p>
416      *
417      */
418     public static final int RAW_PRIVATE = 0x24;
419 
420     /**
421      * <p>
422      * Android 10-bit raw format
423      * </p>
424      * <p>
425      * This is a single-plane, 10-bit per pixel, densely packed (in each row),
426      * unprocessed format, usually representing raw Bayer-pattern images coming
427      * from an image sensor.
428      * </p>
429      * <p>
430      * In an image buffer with this format, starting from the first pixel of
431      * each row, each 4 consecutive pixels are packed into 5 bytes (40 bits).
432      * Each one of the first 4 bytes contains the top 8 bits of each pixel, The
433      * fifth byte contains the 2 least significant bits of the 4 pixels, the
434      * exact layout data for each 4 consecutive pixels is illustrated below
435      * ({@code Pi[j]} stands for the jth bit of the ith pixel):
436      * </p>
437      * <table>
438      * <thead>
439      * <tr>
440      * <th align="center"></th>
441      * <th align="center">bit 7</th>
442      * <th align="center">bit 6</th>
443      * <th align="center">bit 5</th>
444      * <th align="center">bit 4</th>
445      * <th align="center">bit 3</th>
446      * <th align="center">bit 2</th>
447      * <th align="center">bit 1</th>
448      * <th align="center">bit 0</th>
449      * </tr>
450      * </thead> <tbody>
451      * <tr>
452      * <td align="center">Byte 0:</td>
453      * <td align="center">P0[9]</td>
454      * <td align="center">P0[8]</td>
455      * <td align="center">P0[7]</td>
456      * <td align="center">P0[6]</td>
457      * <td align="center">P0[5]</td>
458      * <td align="center">P0[4]</td>
459      * <td align="center">P0[3]</td>
460      * <td align="center">P0[2]</td>
461      * </tr>
462      * <tr>
463      * <td align="center">Byte 1:</td>
464      * <td align="center">P1[9]</td>
465      * <td align="center">P1[8]</td>
466      * <td align="center">P1[7]</td>
467      * <td align="center">P1[6]</td>
468      * <td align="center">P1[5]</td>
469      * <td align="center">P1[4]</td>
470      * <td align="center">P1[3]</td>
471      * <td align="center">P1[2]</td>
472      * </tr>
473      * <tr>
474      * <td align="center">Byte 2:</td>
475      * <td align="center">P2[9]</td>
476      * <td align="center">P2[8]</td>
477      * <td align="center">P2[7]</td>
478      * <td align="center">P2[6]</td>
479      * <td align="center">P2[5]</td>
480      * <td align="center">P2[4]</td>
481      * <td align="center">P2[3]</td>
482      * <td align="center">P2[2]</td>
483      * </tr>
484      * <tr>
485      * <td align="center">Byte 3:</td>
486      * <td align="center">P3[9]</td>
487      * <td align="center">P3[8]</td>
488      * <td align="center">P3[7]</td>
489      * <td align="center">P3[6]</td>
490      * <td align="center">P3[5]</td>
491      * <td align="center">P3[4]</td>
492      * <td align="center">P3[3]</td>
493      * <td align="center">P3[2]</td>
494      * </tr>
495      * <tr>
496      * <td align="center">Byte 4:</td>
497      * <td align="center">P3[1]</td>
498      * <td align="center">P3[0]</td>
499      * <td align="center">P2[1]</td>
500      * <td align="center">P2[0]</td>
501      * <td align="center">P1[1]</td>
502      * <td align="center">P1[0]</td>
503      * <td align="center">P0[1]</td>
504      * <td align="center">P0[0]</td>
505      * </tr>
506      * </tbody>
507      * </table>
508      * <p>
509      * This format assumes
510      * <ul>
511      * <li>a width multiple of 4 pixels</li>
512      * <li>an even height</li>
513      * </ul>
514      * </p>
515      *
516      * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
517      * not pixels.
518      *
519      * <p>
520      * Since this is a densely packed format, the pixel stride is always 0. The
521      * application must use the pixel data layout defined in above table to
522      * access each row data. When row stride is equal to {@code width * (10 / 8)}, there
523      * will be no padding bytes at the end of each row, the entire image data is
524      * densely packed. When stride is larger than {@code width * (10 / 8)}, padding
525      * bytes will be present at the end of each row.
526      * </p>
527      * <p>
528      * For example, the {@link android.media.Image} object can provide data in
529      * this format from a {@link android.hardware.camera2.CameraDevice} (if
530      * supported) through a {@link android.media.ImageReader} object. The
531      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
532      * single plane containing the pixel data. The pixel stride is always 0 in
533      * {@link android.media.Image.Plane#getPixelStride()}, and the
534      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
535      * neighboring pixel distance (in bytes) between adjacent rows.
536      * </p>
537      *
538      * @see android.media.Image
539      * @see android.media.ImageReader
540      * @see android.hardware.camera2.CameraDevice
541      */
542     public static final int RAW10 = 0x25;
543 
544     /**
545      * <p>
546      * Android 12-bit raw format
547      * </p>
548      * <p>
549      * This is a single-plane, 12-bit per pixel, densely packed (in each row),
550      * unprocessed format, usually representing raw Bayer-pattern images coming
551      * from an image sensor.
552      * </p>
553      * <p>
554      * In an image buffer with this format, starting from the first pixel of each
555      * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
556      * and second byte contains the top 8 bits of first and second pixel. The third
557      * byte contains the 4 least significant bits of the two pixels, the exact layout
558      * data for each two consecutive pixels is illustrated below (Pi[j] stands for
559      * the jth bit of the ith pixel):
560      * </p>
561      * <table>
562      * <thead>
563      * <tr>
564      * <th align="center"></th>
565      * <th align="center">bit 7</th>
566      * <th align="center">bit 6</th>
567      * <th align="center">bit 5</th>
568      * <th align="center">bit 4</th>
569      * <th align="center">bit 3</th>
570      * <th align="center">bit 2</th>
571      * <th align="center">bit 1</th>
572      * <th align="center">bit 0</th>
573      * </tr>
574      * </thead> <tbody>
575      * <tr>
576      * <td align="center">Byte 0:</td>
577      * <td align="center">P0[11]</td>
578      * <td align="center">P0[10]</td>
579      * <td align="center">P0[ 9]</td>
580      * <td align="center">P0[ 8]</td>
581      * <td align="center">P0[ 7]</td>
582      * <td align="center">P0[ 6]</td>
583      * <td align="center">P0[ 5]</td>
584      * <td align="center">P0[ 4]</td>
585      * </tr>
586      * <tr>
587      * <td align="center">Byte 1:</td>
588      * <td align="center">P1[11]</td>
589      * <td align="center">P1[10]</td>
590      * <td align="center">P1[ 9]</td>
591      * <td align="center">P1[ 8]</td>
592      * <td align="center">P1[ 7]</td>
593      * <td align="center">P1[ 6]</td>
594      * <td align="center">P1[ 5]</td>
595      * <td align="center">P1[ 4]</td>
596      * </tr>
597      * <tr>
598      * <td align="center">Byte 2:</td>
599      * <td align="center">P1[ 3]</td>
600      * <td align="center">P1[ 2]</td>
601      * <td align="center">P1[ 1]</td>
602      * <td align="center">P1[ 0]</td>
603      * <td align="center">P0[ 3]</td>
604      * <td align="center">P0[ 2]</td>
605      * <td align="center">P0[ 1]</td>
606      * <td align="center">P0[ 0]</td>
607      * </tr>
608      * </tbody>
609      * </table>
610      * <p>
611      * This format assumes
612      * <ul>
613      * <li>a width multiple of 4 pixels</li>
614      * <li>an even height</li>
615      * </ul>
616      * </p>
617      *
618      * <pre>size = row stride * height</pre> where the row stride is in <em>bytes</em>,
619      * not pixels.
620      *
621      * <p>
622      * Since this is a densely packed format, the pixel stride is always 0. The
623      * application must use the pixel data layout defined in above table to
624      * access each row data. When row stride is equal to {@code width * (12 / 8)}, there
625      * will be no padding bytes at the end of each row, the entire image data is
626      * densely packed. When stride is larger than {@code width * (12 / 8)}, padding
627      * bytes will be present at the end of each row.
628      * </p>
629      * <p>
630      * For example, the {@link android.media.Image} object can provide data in
631      * this format from a {@link android.hardware.camera2.CameraDevice} (if
632      * supported) through a {@link android.media.ImageReader} object. The
633      * {@link android.media.Image#getPlanes() Image#getPlanes()} will return a
634      * single plane containing the pixel data. The pixel stride is always 0 in
635      * {@link android.media.Image.Plane#getPixelStride()}, and the
636      * {@link android.media.Image.Plane#getRowStride()} describes the vertical
637      * neighboring pixel distance (in bytes) between adjacent rows.
638      * </p>
639      *
640      * @see android.media.Image
641      * @see android.media.ImageReader
642      * @see android.hardware.camera2.CameraDevice
643      */
644     public static final int RAW12 = 0x26;
645 
646     /**
647      * <p>Android dense depth image format.</p>
648      *
649      * <p>Each pixel is 16 bits, representing a depth ranging measurement from a depth camera or
650      * similar sensor. The 16-bit sample consists of a confidence value and the actual ranging
651      * measurement.</p>
652      *
653      * <p>The confidence value is an estimate of correctness for this sample.  It is encoded in the
654      * 3 most significant bits of the sample, with a value of 0 representing 100% confidence, a
655      * value of 1 representing 0% confidence, a value of 2 representing 1/7, a value of 3
656      * representing 2/7, and so on.</p>
657      *
658      * <p>As an example, the following sample extracts the range and confidence from the first pixel
659      * of a DEPTH16-format {@link android.media.Image}, and converts the confidence to a
660      * floating-point value between 0 and 1.f inclusive, with 1.f representing maximum confidence:
661      *
662      * <pre>
663      *    ShortBuffer shortDepthBuffer = img.getPlanes()[0].getBuffer().asShortBuffer();
664      *    short depthSample = shortDepthBuffer.get()
665      *    short depthRange = (short) (depthSample & 0x1FFF);
666      *    short depthConfidence = (short) ((depthSample >> 13) & 0x7);
667      *    float depthPercentage = depthConfidence == 0 ? 1.f : (depthConfidence - 1) / 7.f;
668      * </pre>
669      * </p>
670      *
671      * <p>This format assumes
672      * <ul>
673      * <li>an even width</li>
674      * <li>an even height</li>
675      * <li>a horizontal stride multiple of 16 pixels</li>
676      * </ul>
677      * </p>
678      *
679      * <pre> y_size = stride * height </pre>
680      *
681      * When produced by a camera, the units for the range are millimeters.
682      */
683     public static final int DEPTH16 = 0x44363159;
684 
685     /**
686      * Android sparse depth point cloud format.
687      *
688      * <p>A variable-length list of 3D points plus a confidence value, with each point represented
689      * by four floats; first the X, Y, Z position coordinates, and then the confidence value.</p>
690      *
691      * <p>The number of points is {@code (size of the buffer in bytes) / 16}.
692      *
693      * <p>The coordinate system and units of the position values depend on the source of the point
694      * cloud data. The confidence value is between 0.f and 1.f, inclusive, with 0 representing 0%
695      * confidence and 1.f representing 100% confidence in the measured position values.</p>
696      *
697      * <p>As an example, the following code extracts the first depth point in a DEPTH_POINT_CLOUD
698      * format {@link android.media.Image}:
699      * <pre>
700      *    FloatBuffer floatDepthBuffer = img.getPlanes()[0].getBuffer().asFloatBuffer();
701      *    float x = floatDepthBuffer.get();
702      *    float y = floatDepthBuffer.get();
703      *    float z = floatDepthBuffer.get();
704      *    float confidence = floatDepthBuffer.get();
705      * </pre>
706      *
707      * For camera devices that support the
708      * {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT DEPTH_OUTPUT}
709      * capability, DEPTH_POINT_CLOUD coordinates have units of meters, and the coordinate system is
710      * defined by the camera's pose transforms:
711      * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_TRANSLATION} and
712      * {@link android.hardware.camera2.CameraCharacteristics#LENS_POSE_ROTATION}. That means the origin is
713      * the optical center of the camera device, and the positive Z axis points along the camera's optical axis,
714      * toward the scene.
715      */
716     public static final int DEPTH_POINT_CLOUD = 0x101;
717 
718     /**
719      * Unprocessed implementation-dependent raw
720      * depth measurements, opaque with 16 bit
721      * samples.
722      *
723      * @hide
724      */
725     public static final int RAW_DEPTH = 0x1002;
726 
727     /**
728      * Android private opaque image format.
729      * <p>
730      * The choices of the actual format and pixel data layout are entirely up to
731      * the device-specific and framework internal implementations, and may vary
732      * depending on use cases even for the same device. The buffers of this
733      * format can be produced by components like
734      * {@link android.media.ImageWriter ImageWriter} , and interpreted correctly
735      * by consumers like {@link android.hardware.camera2.CameraDevice
736      * CameraDevice} based on the device/framework private information. However,
737      * these buffers are not directly accessible to the application.
738      * </p>
739      * <p>
740      * When an {@link android.media.Image Image} of this format is obtained from
741      * an {@link android.media.ImageReader ImageReader} or
742      * {@link android.media.ImageWriter ImageWriter}, the
743      * {@link android.media.Image#getPlanes() getPlanes()} method will return an
744      * empty {@link android.media.Image.Plane Plane} array.
745      * </p>
746      * <p>
747      * If a buffer of this format is to be used as an OpenGL ES texture, the
748      * framework will assume that sampling the texture will always return an
749      * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values).
750      * </p>
751      */
752     public static final int PRIVATE = 0x22;
753 
754     /**
755      * Compressed HEIC format.
756      *
757      * <p>This format defines the HEIC brand of High Efficiency Image File
758      * Format as described in ISO/IEC 23008-12.</p>
759      */
760     public static final int HEIC = 0x48454946;
761 
762     /**
763      * Use this function to retrieve the number of bits per pixel of an
764      * ImageFormat.
765      *
766      * @param format
767      * @return the number of bits per pixel of the given format or -1 if the
768      *         format doesn't exist or is not supported.
769      */
getBitsPerPixel(@ormat int format)770     public static int getBitsPerPixel(@Format int format) {
771         switch (format) {
772             case RGB_565:
773                 return 16;
774             case NV16:
775                 return 16;
776             case YUY2:
777                 return 16;
778             case YV12:
779                 return 12;
780             case Y8:
781                 return 8;
782             case Y16:
783             case DEPTH16:
784                 return 16;
785             case NV21:
786                 return 12;
787             case YUV_420_888:
788                 return 12;
789             case YUV_422_888:
790                 return 16;
791             case YUV_444_888:
792                 return 24;
793             case FLEX_RGB_888:
794                 return 24;
795             case FLEX_RGBA_8888:
796                 return 32;
797             case RAW_DEPTH:
798             case RAW_SENSOR:
799                 return 16;
800             case RAW10:
801                 return 10;
802             case RAW12:
803                 return 12;
804         }
805         return -1;
806     }
807 
808     /**
809      * Determine whether or not this is a public-visible {@code format}.
810      *
811      * <p>In particular, {@code @hide} formats will return {@code false}.</p>
812      *
813      * <p>Any other formats (including UNKNOWN) will return {@code false}.</p>
814      *
815      * @param format an integer format
816      * @return a boolean
817      *
818      * @hide
819      */
isPublicFormat(@ormat int format)820     public static boolean isPublicFormat(@Format int format) {
821         switch (format) {
822             case RGB_565:
823             case NV16:
824             case YUY2:
825             case YV12:
826             case JPEG:
827             case NV21:
828             case YUV_420_888:
829             case YUV_422_888:
830             case YUV_444_888:
831             case FLEX_RGB_888:
832             case FLEX_RGBA_8888:
833             case RAW_SENSOR:
834             case RAW_PRIVATE:
835             case RAW10:
836             case RAW12:
837             case DEPTH16:
838             case DEPTH_POINT_CLOUD:
839             case PRIVATE:
840             case RAW_DEPTH:
841             case Y8:
842             case DEPTH_JPEG:
843             case HEIC:
844                 return true;
845         }
846 
847         return false;
848     }
849 }
850