2 * Copyright (C) 2016 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 package android.hardware.graphics.common@1.0;
20 * pixel format definitions
22 @export(name="android_pixel_format_t", value_prefix="HAL_PIXEL_FORMAT_")
23 enum PixelFormat : int32_t {
25 * "linear" color pixel formats:
27 * When used with ANativeWindow, the dataSpace field describes the color
28 * space of the buffer.
30 * The color space determines, for example, if the formats are linear or
31 * gamma-corrected; or whether any special operations are performed when
32 * reading or writing into a buffer in one of these formats.
41 * The following formats use 10bit integers for R, G, and B and
42 * 2 bits for alpha. This is used to improve color precision on
43 * wide-color devices, e.g. Display-P3 or scRGB.
45 * When used with ANativeWindow, the dataSpace field describes the color
46 * space of the buffer.
51 * The following formats use a 16bit float per color component.
53 * When used with ANativeWindow, the dataSpace field describes the color
54 * space of the buffer.
61 * This range is reserved for pixel formats that are specific to the HAL
62 * implementation. Implementations can use any value in this range to
63 * communicate video pixel formats between their HAL modules. These formats
64 * must not have an alpha channel. Additionally, an EGLimage created from a
65 * gralloc buffer of one of these formats must be supported for use with the
66 * GL_OES_EGL_image_external OpenGL ES extension.
72 * This format is exposed outside of the HAL to software decoders and
73 * applications. EGLImageKHR must support it in conjunction with the
74 * OES_EGL_image_external extension.
76 * YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
77 * by (W/2) x (H/2) Cr and Cb planes.
82 * - a horizontal stride multiple of 16 pixels
83 * - a vertical stride equal to the height
85 * y_size = stride * height
86 * c_stride = ALIGN(stride/2, 16)
87 * c_size = c_stride * height/2
88 * size = y_size + c_size * 2
90 * cb_offset = y_size + c_size
92 * When used with ANativeWindow, the dataSpace field describes the color
93 * space of the buffer.
95 YV12 = 0x32315659, // YCrCb 4:2:0 Planar
101 * This format is exposed outside of the HAL to the framework.
102 * The expected gralloc usage flags are SW_* and HW_CAMERA_*,
103 * and no other HW_ flags will be used.
105 * Y8 is a YUV planar format comprised of a WxH Y plane,
106 * with each pixel being represented by 8 bits.
108 * It is equivalent to just the Y plane from YV12.
110 * This format assumes
113 * - a horizontal stride multiple of 16 pixels
114 * - a vertical stride equal to the height
116 * size = stride * height
118 * When used with ANativeWindow, the dataSpace field describes the color
119 * space of the buffer.
124 * Android Y16 format:
126 * This format is exposed outside of the HAL to the framework.
127 * The expected gralloc usage flags are SW_* and HW_CAMERA_*,
128 * and no other HW_ flags will be used.
130 * Y16 is a YUV planar format comprised of a WxH Y plane,
131 * with each pixel being represented by 16 bits.
133 * It is just like Y8, but has double the bits per pixel (little endian).
135 * This format assumes
138 * - a horizontal stride multiple of 16 pixels
139 * - a vertical stride equal to the height
140 * - strides are specified in pixels, not in bytes
142 * size = stride * height * 2
144 * When used with ANativeWindow, the dataSpace field describes the color
145 * space of the buffer, except that dataSpace field
146 * HAL_DATASPACE_DEPTH indicates that this buffer contains a depth
147 * image where each sample is a distance value measured by a depth camera,
148 * plus an associated confidence value.
153 * Android RAW sensor format:
155 * This format is exposed outside of the camera HAL to applications.
157 * RAW16 is a single-channel, 16-bit, little endian format, typically
158 * representing raw Bayer-pattern images from an image sensor, with minimal
161 * The exact pixel layout of the data in the buffer is sensor-dependent, and
162 * needs to be queried from the camera device.
164 * Generally, not all 16 bits are used; more common values are 10 or 12
165 * bits. If not all bits are used, the lower-order bits are filled first.
166 * All parameters to interpret the raw data (black and white points,
167 * color space, etc) must be queried from the camera device.
169 * This format assumes
172 * - a horizontal stride multiple of 16 pixels
173 * - a vertical stride equal to the height
174 * - strides are specified in pixels, not in bytes
176 * size = stride * height * 2
178 * This format must be accepted by the gralloc module when used with the
179 * following usage flags:
180 * - GRALLOC_USAGE_HW_CAMERA_*
181 * - GRALLOC_USAGE_SW_*
182 * - GRALLOC_USAGE_RENDERSCRIPT
184 * When used with ANativeWindow, the mapping of the dataSpace field to
185 * buffer contents for RAW16 is as follows:
187 * dataSpace value | Buffer contents
188 * -------------------------------+-----------------------------------------
189 * HAL_DATASPACE_ARBITRARY | Raw image sensor data, layout is as
191 * HAL_DATASPACE_DEPTH | Unprocessed implementation-dependent raw
192 * | depth measurements, opaque with 16 bit
194 * Other | Unsupported
199 * Android RAW10 format:
201 * This format is exposed outside of the camera HAL to applications.
203 * RAW10 is a single-channel, 10-bit per pixel, densely packed in each row,
204 * unprocessed format, usually representing raw Bayer-pattern images coming from
207 * In an image buffer with this format, starting from the first pixel of each
208 * row, each 4 consecutive pixels are packed into 5 bytes (40 bits). Each one
209 * of the first 4 bytes contains the top 8 bits of each pixel, The fifth byte
210 * contains the 2 least significant bits of the 4 pixels, the exact layout data
211 * for each 4 consecutive pixels is illustrated below (Pi[j] stands for the jth
212 * bit of the ith pixel):
215 * =====|=====|=====|=====|=====|=====|=====|=====|
216 * Byte 0: |P0[9]|P0[8]|P0[7]|P0[6]|P0[5]|P0[4]|P0[3]|P0[2]|
217 * |-----|-----|-----|-----|-----|-----|-----|-----|
218 * Byte 1: |P1[9]|P1[8]|P1[7]|P1[6]|P1[5]|P1[4]|P1[3]|P1[2]|
219 * |-----|-----|-----|-----|-----|-----|-----|-----|
220 * Byte 2: |P2[9]|P2[8]|P2[7]|P2[6]|P2[5]|P2[4]|P2[3]|P2[2]|
221 * |-----|-----|-----|-----|-----|-----|-----|-----|
222 * Byte 3: |P3[9]|P3[8]|P3[7]|P3[6]|P3[5]|P3[4]|P3[3]|P3[2]|
223 * |-----|-----|-----|-----|-----|-----|-----|-----|
224 * Byte 4: |P3[1]|P3[0]|P2[1]|P2[0]|P1[1]|P1[0]|P0[1]|P0[0]|
225 * ===============================================
227 * This format assumes
228 * - a width multiple of 4 pixels
230 * - a vertical stride equal to the height
231 * - strides are specified in bytes, not in pixels
233 * size = stride * height
235 * When stride is equal to width * (10 / 8), there will be no padding bytes at
236 * the end of each row, the entire image data is densely packed. When stride is
237 * larger than width * (10 / 8), padding bytes will be present at the end of each
238 * row (including the last row).
240 * This format must be accepted by the gralloc module when used with the
241 * following usage flags:
242 * - GRALLOC_USAGE_HW_CAMERA_*
243 * - GRALLOC_USAGE_SW_*
244 * - GRALLOC_USAGE_RENDERSCRIPT
246 * When used with ANativeWindow, the dataSpace field should be
247 * HAL_DATASPACE_ARBITRARY, as raw image sensor buffers require substantial
248 * extra metadata to define.
253 * Android RAW12 format:
255 * This format is exposed outside of camera HAL to applications.
257 * RAW12 is a single-channel, 12-bit per pixel, densely packed in each row,
258 * unprocessed format, usually representing raw Bayer-pattern images coming from
261 * In an image buffer with this format, starting from the first pixel of each
262 * row, each two consecutive pixels are packed into 3 bytes (24 bits). The first
263 * and second byte contains the top 8 bits of first and second pixel. The third
264 * byte contains the 4 least significant bits of the two pixels, the exact layout
265 * data for each two consecutive pixels is illustrated below (Pi[j] stands for
266 * the jth bit of the ith pixel):
269 * ======|======|======|======|======|======|======|======|
270 * Byte 0: |P0[11]|P0[10]|P0[ 9]|P0[ 8]|P0[ 7]|P0[ 6]|P0[ 5]|P0[ 4]|
271 * |------|------|------|------|------|------|------|------|
272 * Byte 1: |P1[11]|P1[10]|P1[ 9]|P1[ 8]|P1[ 7]|P1[ 6]|P1[ 5]|P1[ 4]|
273 * |------|------|------|------|------|------|------|------|
274 * Byte 2: |P1[ 3]|P1[ 2]|P1[ 1]|P1[ 0]|P0[ 3]|P0[ 2]|P0[ 1]|P0[ 0]|
275 * =======================================================
277 * This format assumes:
278 * - a width multiple of 4 pixels
280 * - a vertical stride equal to the height
281 * - strides are specified in bytes, not in pixels
283 * size = stride * height
285 * When stride is equal to width * (12 / 8), there will be no padding bytes at
286 * the end of each row, the entire image data is densely packed. When stride is
287 * larger than width * (12 / 8), padding bytes will be present at the end of
288 * each row (including the last row).
290 * This format must be accepted by the gralloc module when used with the
291 * following usage flags:
292 * - GRALLOC_USAGE_HW_CAMERA_*
293 * - GRALLOC_USAGE_SW_*
294 * - GRALLOC_USAGE_RENDERSCRIPT
296 * When used with ANativeWindow, the dataSpace field should be
297 * HAL_DATASPACE_ARBITRARY, as raw image sensor buffers require substantial
298 * extra metadata to define.
303 * Android opaque RAW format:
305 * This format is exposed outside of the camera HAL to applications.
307 * RAW_OPAQUE is a format for unprocessed raw image buffers coming from an
308 * image sensor. The actual structure of buffers of this format is
309 * implementation-dependent.
311 * This format must be accepted by the gralloc module when used with the
312 * following usage flags:
313 * - GRALLOC_USAGE_HW_CAMERA_*
314 * - GRALLOC_USAGE_SW_*
315 * - GRALLOC_USAGE_RENDERSCRIPT
317 * When used with ANativeWindow, the dataSpace field should be
318 * HAL_DATASPACE_ARBITRARY, as raw image sensor buffers require substantial
319 * extra metadata to define.
324 * Android binary blob graphics buffer format:
326 * This format is used to carry task-specific data which does not have a
327 * standard image structure. The details of the format are left to the two
330 * A typical use case is for transporting JPEG-compressed images from the
331 * Camera HAL to the framework or to applications.
333 * Buffers of this format must have a height of 1, and width equal to their
336 * When used with ANativeWindow, the mapping of the dataSpace field to
337 * buffer contents for BLOB is as follows:
339 * dataSpace value | Buffer contents
340 * -------------------------------+-----------------------------------------
341 * HAL_DATASPACE_JFIF | An encoded JPEG image
342 * HAL_DATASPACE_DEPTH | An android_depth_points buffer
343 * HAL_DATASPACE_SENSOR | Sensor event data.
344 * Other | Unsupported
350 * Android format indicating that the choice of format is entirely up to the
351 * device-specific Gralloc implementation.
353 * The Gralloc implementation should examine the usage bits passed in when
354 * allocating a buffer with this format, and it should derive the pixel
355 * format from those usage flags. This format will never be used with any
356 * of the GRALLOC_USAGE_SW_* usage flags.
358 * If a buffer of this format is to be used as an OpenGL ES texture, the
359 * framework will assume that sampling the texture will always return an
360 * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values).
362 * When used with ANativeWindow, the dataSpace field describes the color
363 * space of the buffer.
365 IMPLEMENTATION_DEFINED = 0x22,
368 * Android flexible YCbCr 4:2:0 formats
370 * This format allows platforms to use an efficient YCbCr/YCrCb 4:2:0
371 * buffer layout, while still describing the general format in a
372 * layout-independent manner. While called YCbCr, it can be
373 * used to describe formats with either chromatic ordering, as well as
374 * whole planar or semiplanar layouts.
376 * struct android_ycbcr (below) is the the struct used to describe it.
378 * This format must be accepted by the gralloc module when
379 * USAGE_SW_WRITE_* or USAGE_SW_READ_* are set.
381 * This format is locked for use by gralloc's (*lock_ycbcr) method, and
382 * locking with the (*lock) method will return an error.
384 * When used with ANativeWindow, the dataSpace field describes the color
385 * space of the buffer.
387 YCBCR_420_888 = 0x23,
390 * Android flexible YCbCr 4:2:2 formats
392 * This format allows platforms to use an efficient YCbCr/YCrCb 4:2:2
393 * buffer layout, while still describing the general format in a
394 * layout-independent manner. While called YCbCr, it can be
395 * used to describe formats with either chromatic ordering, as well as
396 * whole planar or semiplanar layouts.
398 * This format is currently only used by SW readable buffers
399 * produced by MediaCodecs, so the gralloc module can ignore this format.
401 YCBCR_422_888 = 0x27,
404 * Android flexible YCbCr 4:4:4 formats
406 * This format allows platforms to use an efficient YCbCr/YCrCb 4:4:4
407 * buffer layout, while still describing the general format in a
408 * layout-independent manner. While called YCbCr, it can be
409 * used to describe formats with either chromatic ordering, as well as
410 * whole planar or semiplanar layouts.
412 * This format is currently only used by SW readable buffers
413 * produced by MediaCodecs, so the gralloc module can ignore this format.
415 YCBCR_444_888 = 0x28,
418 * Android flexible RGB 888 formats
420 * This format allows platforms to use an efficient RGB/BGR/RGBX/BGRX
421 * buffer layout, while still describing the general format in a
422 * layout-independent manner. While called RGB, it can be
423 * used to describe formats with either color ordering and optional
424 * padding, as well as whole planar layout.
426 * This format is currently only used by SW readable buffers
427 * produced by MediaCodecs, so the gralloc module can ignore this format.
432 * Android flexible RGBA 8888 formats
434 * This format allows platforms to use an efficient RGBA/BGRA/ARGB/ABGR
435 * buffer layout, while still describing the general format in a
436 * layout-independent manner. While called RGBA, it can be
437 * used to describe formats with any of the component orderings, as
438 * well as whole planar layout.
440 * This format is currently only used by SW readable buffers
441 * produced by MediaCodecs, so the gralloc module can ignore this format.
443 FLEX_RGBA_8888 = 0x2A,
445 /** Legacy formats (deprecated), used by ImageFormat.java */
446 YCBCR_422_SP = 0x10, // NV16
447 YCRCB_420_SP = 0x11, // NV21
448 YCBCR_422_I = 0x14, // YUY2
453 * Buffer usage definitions.
455 enum BufferUsage : uint64_t {
456 /** bit 0-3 is an enum */
457 CPU_READ_MASK = 0xfULL,
458 /** buffer is never read by CPU */
460 /** buffer is rarely read by CPU */
462 /** buffer is often read by CPU */
465 /** bit 4-7 is an enum */
466 CPU_WRITE_MASK = 0xfULL << 4,
467 /** buffer is never written by CPU */
468 CPU_WRITE_NEVER = 0 << 4,
469 /** buffer is rarely written by CPU */
470 CPU_WRITE_RARELY = 2 << 4,
471 /** buffer is often written by CPU */
472 CPU_WRITE_OFTEN = 3 << 4,
474 /** buffer is used as a GPU texture */
475 GPU_TEXTURE = 1ULL << 8,
477 /** buffer is used as a GPU render target */
478 GPU_RENDER_TARGET = 1ULL << 9,
480 /** bit 10 must be zero */
482 /** buffer is used as a composer HAL overlay layer */
483 COMPOSER_OVERLAY = 1ULL << 11,
484 /** buffer is used as a composer HAL client target */
485 COMPOSER_CLIENT_TARGET = 1ULL << 12,
487 /** bit 13 must be zero */
490 * Buffer is allocated with hardware-level protection against copying the
491 * contents (or information derived from the contents) into unprotected
494 PROTECTED = 1ULL << 14,
496 /** buffer is used as a hwcomposer HAL cursor layer */
497 COMPOSER_CURSOR = 1ULL << 15,
499 /** buffer is used as a video encoder input */
500 VIDEO_ENCODER = 1ULL << 16,
502 /** buffer is used as a camera HAL output */
503 CAMERA_OUTPUT = 1ULL << 17,
505 /** buffer is used as a camera HAL input */
506 CAMERA_INPUT = 1ULL << 18,
508 /** bit 19 must be zero */
510 /** buffer is used as a renderscript allocation */
511 RENDERSCRIPT = 1ULL << 20,
513 /** bit 21 must be zero */
515 /** buffer is used as a video decoder output */
516 VIDEO_DECODER = 1ULL << 22,
518 /** buffer is used as a sensor direct report output */
519 SENSOR_DIRECT_DATA = 1ULL << 23,
522 * buffer is used as as an OpenGL shader storage or uniform
525 GPU_DATA_BUFFER = 1ULL << 24,
527 /** bits 25-27 must be zero and are reserved for future versions */
528 /** bits 28-31 are reserved for vendor extensions */
529 VENDOR_MASK = 0xfULL << 28,
531 /** bits 32-47 must be zero and are reserved for future versions */
532 /** bits 48-63 are reserved for vendor extensions */
533 VENDOR_MASK_HI = 0xffffULL << 48,
537 * Transformation definitions
540 * ROT_90 is applied CLOCKWISE and AFTER FLIP_{H|V}.
543 @export(name="android_transform_t", value_prefix="HAL_TRANSFORM_")
544 enum Transform : int32_t {
545 /** flip source image horizontally (around the vertical axis) */
548 * flip source image vertically (around the horizontal axis)*/
550 /** rotate source image 90 degrees clockwise */
552 /** rotate source image 180 degrees */
554 /** rotate source image 270 degrees clockwise */
557 /** 0x08 is reserved */
561 * Dataspace Definitions
562 * ======================
564 * Dataspace is the definition of how pixel values should be interpreted.
566 * For many formats, this is the colorspace of the image data, which includes
567 * primaries (including white point) and the transfer characteristic function,
568 * which describes both gamma curve and numeric range (within the bit depth).
570 * Other dataspaces include depth measurement data from a depth camera.
572 * A dataspace is comprised of a number of fields.
576 * The top 2 bits represent the revision of the field specification. This is
577 * currently always 0.
581 * +-----+----------------------------------------------------+
582 * fields | Rev | Revision specific fields |
583 * +-----+----------------------------------------------------+
585 * Field layout for version = 0:
586 * ----------------------------
588 * A dataspace is comprised of the following fields:
593 * bits 31-30 29-27 26 - 22 21 - 16 15 - 0
594 * +-----+-----+--------+--------+----------------------------+
595 * fields | 0 |Range|Transfer|Standard| Legacy and custom |
596 * +-----+-----+--------+--------+----------------------------+
597 * VV RRR TTTTT SSSSSS LLLLLLLL LLLLLLLL
599 * If range, transfer and standard fields are all 0 (e.g. top 16 bits are
600 * all zeroes), the bottom 16 bits contain either a legacy dataspace value,
603 @export(name="android_dataspace_t", value_prefix="HAL_DATASPACE_")
604 enum Dataspace : int32_t {
606 * Default-assumption data space, when not explicitly specified.
608 * It is safest to assume the buffer is an image with sRGB primaries and
609 * encoding ranges, but the consumer and/or the producer of the data may
610 * simply be using defaults. No automatic gamma transform should be
611 * expected, except for a possible display gamma transform when drawn to a
617 * Arbitrary dataspace with manually defined characteristics. Definition
618 * for colorspaces or other meaning must be communicated separately.
620 * This is used when specifying primaries, transfer characteristics,
623 * A typical use case is in video encoding parameters (e.g. for H.264),
624 * where a colorspace can have separately defined primaries, transfer
625 * characteristics, etc.
630 * Color-description aspects
632 * The following aspects define various characteristics of the color
633 * specification. These represent bitfields, so that a data space value
634 * can specify each of them independently.
642 * Defines the chromaticity coordinates of the source primaries in terms of
643 * the CIE 1931 definition of x and y specified in ISO 11664-1.
645 STANDARD_MASK = 63 << STANDARD_SHIFT, // 0x3F
648 * Chromacity coordinates are unknown or are determined by the application.
649 * Implementations shall use the following suggested standards:
651 * All YCbCr formats: BT709 if size is 720p or larger (since most video
652 * content is letterboxed this corresponds to width is
653 * 1280 or greater, or height is 720 or greater).
654 * BT601_625 if size is smaller than 720p or is JPEG.
655 * All RGB formats: BT709.
657 * For all other formats standard is undefined, and implementations should use
658 * an appropriate standard for the data represented.
660 STANDARD_UNSPECIFIED = 0 << STANDARD_SHIFT,
667 * white (D65) 0.3127 0.3290
669 * Use the unadjusted KR = 0.2126, KB = 0.0722 luminance interpretation
670 * for RGB conversion.
672 STANDARD_BT709 = 1 << STANDARD_SHIFT,
679 * white (D65) 0.3127 0.3290
681 * KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
682 * for RGB conversion from the one purely determined by the primaries
683 * to minimize the color shift into RGB space that uses BT.709
686 STANDARD_BT601_625 = 2 << STANDARD_SHIFT,
693 * white (D65) 0.3127 0.3290
695 * Use the unadjusted KR = 0.222, KB = 0.071 luminance interpretation
696 * for RGB conversion.
698 STANDARD_BT601_625_UNADJUSTED = 3 << STANDARD_SHIFT,
705 * white (D65) 0.3127 0.3290
707 * KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
708 * for RGB conversion from the one purely determined by the primaries
709 * to minimize the color shift into RGB space that uses BT.709
712 STANDARD_BT601_525 = 4 << STANDARD_SHIFT,
719 * white (D65) 0.3127 0.3290
721 * Use the unadjusted KR = 0.212, KB = 0.087 luminance interpretation
722 * for RGB conversion (as in SMPTE 240M).
724 STANDARD_BT601_525_UNADJUSTED = 5 << STANDARD_SHIFT,
731 * white (D65) 0.3127 0.3290
733 * Use the unadjusted KR = 0.2627, KB = 0.0593 luminance interpretation
734 * for RGB conversion.
736 STANDARD_BT2020 = 6 << STANDARD_SHIFT,
743 * white (D65) 0.3127 0.3290
745 * Use the unadjusted KR = 0.2627, KB = 0.0593 luminance interpretation
746 * for RGB conversion using the linear domain.
748 STANDARD_BT2020_CONSTANT_LUMINANCE = 7 << STANDARD_SHIFT,
755 * white (C) 0.310 0.316
757 * Use the unadjusted KR = 0.30, KB = 0.11 luminance interpretation
758 * for RGB conversion.
760 STANDARD_BT470M = 8 << STANDARD_SHIFT,
767 * white (C) 0.310 0.316
769 * Use the unadjusted KR = 0.254, KB = 0.068 luminance interpretation
770 * for RGB conversion.
772 STANDARD_FILM = 9 << STANDARD_SHIFT,
775 * SMPTE EG 432-1 and SMPTE RP 431-2. (DCI-P3)
780 * white (D65) 0.3127 0.3290
782 STANDARD_DCI_P3 = 10 << STANDARD_SHIFT,
790 * white (D65) 0.3127 0.3290
792 STANDARD_ADOBE_RGB = 11 << STANDARD_SHIFT,
801 * Transfer characteristics are the opto-electronic transfer characteristic
802 * at the source as a function of linear optical intensity (luminance).
804 * For digital signals, E corresponds to the recorded value. Normally, the
805 * transfer function is applied in RGB space to each of the R, G and B
806 * components independently. This may result in color shift that can be
807 * minized by applying the transfer function in Lab space only for the L
808 * component. Implementation may apply the transfer function in RGB space
809 * for all pixel formats if desired.
812 TRANSFER_MASK = 31 << TRANSFER_SHIFT, // 0x1F
815 * Transfer characteristics are unknown or are determined by the
818 * Implementations should use the following transfer functions:
820 * For YCbCr formats: use TRANSFER_SMPTE_170M
821 * For RGB formats: use TRANSFER_SRGB
823 * For all other formats transfer function is undefined, and implementations
824 * should use an appropriate standard for the data represented.
826 TRANSFER_UNSPECIFIED = 0 << TRANSFER_SHIFT,
829 * Transfer characteristic curve:
831 * L - luminance of image 0 <= L <= 1 for conventional colorimetry
832 * E - corresponding electrical signal
834 TRANSFER_LINEAR = 1 << TRANSFER_SHIFT,
837 * Transfer characteristic curve:
839 * E = 1.055 * L^(1/2.4) - 0.055 for 0.0031308 <= L <= 1
840 * = 12.92 * L for 0 <= L < 0.0031308
841 * L - luminance of image 0 <= L <= 1 for conventional colorimetry
842 * E - corresponding electrical signal
844 TRANSFER_SRGB = 2 << TRANSFER_SHIFT,
847 * BT.601 525, BT.601 625, BT.709, BT.2020
849 * Transfer characteristic curve:
850 * E = 1.099 * L ^ 0.45 - 0.099 for 0.018 <= L <= 1
851 * = 4.500 * L for 0 <= L < 0.018
852 * L - luminance of image 0 <= L <= 1 for conventional colorimetry
853 * E - corresponding electrical signal
855 TRANSFER_SMPTE_170M = 3 << TRANSFER_SHIFT,
858 * Assumed display gamma 2.2.
860 * Transfer characteristic curve:
862 * L - luminance of image 0 <= L <= 1 for conventional colorimetry
863 * E - corresponding electrical signal
865 TRANSFER_GAMMA2_2 = 4 << TRANSFER_SHIFT,
870 * Transfer characteristic curve:
872 * L - luminance of image 0 <= L <= 1 for conventional colorimetry
873 * E - corresponding electrical signal
875 TRANSFER_GAMMA2_6 = 5 << TRANSFER_SHIFT,
880 * Transfer characteristic curve:
882 * L - luminance of image 0 <= L <= 1 for conventional colorimetry
883 * E - corresponding electrical signal
885 TRANSFER_GAMMA2_8 = 6 << TRANSFER_SHIFT,
888 * SMPTE ST 2084 (Dolby Perceptual Quantizer)
890 * Transfer characteristic curve:
891 * E = ((c1 + c2 * L^n) / (1 + c3 * L^n)) ^ m
892 * c1 = c3 - c2 + 1 = 3424 / 4096 = 0.8359375
893 * c2 = 32 * 2413 / 4096 = 18.8515625
894 * c3 = 32 * 2392 / 4096 = 18.6875
895 * m = 128 * 2523 / 4096 = 78.84375
896 * n = 0.25 * 2610 / 4096 = 0.1593017578125
897 * L - luminance of image 0 <= L <= 1 for HDR colorimetry.
898 * L = 1 corresponds to 10000 cd/m2
899 * E - corresponding electrical signal
901 TRANSFER_ST2084 = 7 << TRANSFER_SHIFT,
904 * ARIB STD-B67 Hybrid Log Gamma
906 * Transfer characteristic curve:
907 * E = r * L^0.5 for 0 <= L <= 1
908 * = a * ln(L - b) + c for 1 < L
913 * L - luminance of image 0 <= L for HDR colorimetry. L = 1 corresponds
914 * to reference white level of 100 cd/m2
915 * E - corresponding electrical signal
917 TRANSFER_HLG = 8 << TRANSFER_SHIFT,
924 * Defines the range of values corresponding to the unit range of 0-1.
925 * This is defined for YCbCr only, but can be expanded to RGB space.
927 RANGE_MASK = 7 << RANGE_SHIFT, // 0x7
930 * Range is unknown or are determined by the application. Implementations
931 * shall use the following suggested ranges:
933 * All YCbCr formats: limited range.
934 * All RGB or RGBA formats (including RAW and Bayer): full range.
935 * All Y formats: full range
937 * For all other formats range is undefined, and implementations should use
938 * an appropriate range for the data represented.
940 RANGE_UNSPECIFIED = 0 << RANGE_SHIFT,
943 * Full range uses all values for Y, Cb and Cr from
944 * 0 to 2^b-1, where b is the bit depth of the color format.
946 RANGE_FULL = 1 << RANGE_SHIFT,
949 * Limited range uses values 16/256*2^b to 235/256*2^b for Y, and
950 * 1/16*2^b to 15/16*2^b for Cb, Cr, R, G and B, where b is the bit depth of
953 * E.g. For 8-bit-depth formats:
954 * Luma (Y) samples should range from 16 to 235, inclusive
955 * Chroma (Cb, Cr) samples should range from 16 to 240, inclusive
957 * For 10-bit-depth formats:
958 * Luma (Y) samples should range from 64 to 940, inclusive
959 * Chroma (Cb, Cr) samples should range from 64 to 960, inclusive
961 RANGE_LIMITED = 2 << RANGE_SHIFT,
964 * Extended range is used for scRGB. Intended for use with
965 * floating point pixel formats. [0.0 - 1.0] is the standard
966 * sRGB space. Values outside the range 0.0 - 1.0 can encode
967 * color outside the sRGB gamut.
968 * Used to blend / merge multiple dataspaces on a single display.
970 RANGE_EXTENDED = 3 << RANGE_SHIFT,
977 * sRGB linear encoding:
979 * The red, green, and blue components are stored in sRGB space, but
980 * are linear, not gamma-encoded.
981 * The RGB primaries and the white point are the same as BT.709.
983 * The values are encoded using the full range ([0,255] for 8-bit) for all
986 SRGB_LINEAR = 0x200, // deprecated, use V0_SRGB_LINEAR
988 V0_SRGB_LINEAR = STANDARD_BT709 | TRANSFER_LINEAR | RANGE_FULL,
992 * scRGB linear encoding:
994 * The red, green, and blue components are stored in extended sRGB space,
995 * but are linear, not gamma-encoded.
996 * The RGB primaries and the white point are the same as BT.709.
998 * The values are floating point.
999 * A pixel value of 1.0, 1.0, 1.0 corresponds to sRGB white (D65) at 80 nits.
1000 * Values beyond the range [0.0 - 1.0] would correspond to other colors
1001 * spaces and/or HDR content.
1003 V0_SCRGB_LINEAR = STANDARD_BT709 | TRANSFER_LINEAR | RANGE_EXTENDED,
1007 * sRGB gamma encoding:
1009 * The red, green and blue components are stored in sRGB space, and
1010 * converted to linear space when read, using the SRGB transfer function
1011 * for each of the R, G and B components. When written, the inverse
1012 * transformation is performed.
1014 * The alpha component, if present, is always stored in linear space and
1015 * is left unmodified when read or written.
1017 * Use full range and BT.709 standard.
1019 SRGB = 0x201, // deprecated, use V0_SRGB
1021 V0_SRGB = STANDARD_BT709 | TRANSFER_SRGB | RANGE_FULL,
1027 * The red, green, and blue components are stored in extended sRGB space,
1028 * but are linear, not gamma-encoded.
1029 * The RGB primaries and the white point are the same as BT.709.
1031 * The values are floating point.
1032 * A pixel value of 1.0, 1.0, 1.0 corresponds to sRGB white (D65) at 80 nits.
1033 * Values beyond the range [0.0 - 1.0] would correspond to other colors
1034 * spaces and/or HDR content.
1036 V0_SCRGB = STANDARD_BT709 | TRANSFER_SRGB | RANGE_EXTENDED,
1042 * Primaries are given using (x,y) coordinates in the CIE 1931 definition
1043 * of x and y specified by ISO 11664-1.
1045 * Transfer characteristics are the opto-electronic transfer characteristic
1046 * at the source as a function of linear optical intensity (luminance).
1050 * JPEG File Interchange Format (JFIF)
1052 * Same model as BT.601-625, but all values (Y, Cb, Cr) range from 0 to 255
1054 * Use full range, BT.601 transfer and BT.601_625 standard.
1056 JFIF = 0x101, // deprecated, use V0_JFIF
1058 V0_JFIF = STANDARD_BT601_625 | TRANSFER_SMPTE_170M | RANGE_FULL,
1061 * ITU-R Recommendation 601 (BT.601) - 625-line
1063 * Standard-definition television, 625 Lines (PAL)
1065 * Use limited range, BT.601 transfer and BT.601_625 standard.
1067 BT601_625 = 0x102, // deprecated, use V0_BT601_625
1069 V0_BT601_625 = STANDARD_BT601_625 | TRANSFER_SMPTE_170M | RANGE_LIMITED,
1073 * ITU-R Recommendation 601 (BT.601) - 525-line
1075 * Standard-definition television, 525 Lines (NTSC)
1077 * Use limited range, BT.601 transfer and BT.601_525 standard.
1079 BT601_525 = 0x103, // deprecated, use V0_BT601_525
1081 V0_BT601_525 = STANDARD_BT601_525 | TRANSFER_SMPTE_170M | RANGE_LIMITED,
1084 * ITU-R Recommendation 709 (BT.709)
1086 * High-definition television
1088 * Use limited range, BT.709 transfer and BT.709 standard.
1090 BT709 = 0x104, // deprecated, use V0_BT709
1092 V0_BT709 = STANDARD_BT709 | TRANSFER_SMPTE_170M | RANGE_LIMITED,
1096 * SMPTE EG 432-1 and SMPTE RP 431-2.
1098 * Digital Cinema DCI-P3
1100 * Use full range, linear transfer and D65 DCI-P3 standard
1102 DCI_P3_LINEAR = STANDARD_DCI_P3 | TRANSFER_LINEAR | RANGE_FULL,
1106 * SMPTE EG 432-1 and SMPTE RP 431-2.
1108 * Digital Cinema DCI-P3
1110 * Use full range, gamma 2.6 transfer and D65 DCI-P3 standard
1111 * Note: Application is responsible for gamma encoding the data as
1112 * a 2.6 gamma encoding is not supported in HW.
1114 DCI_P3 = STANDARD_DCI_P3 | TRANSFER_GAMMA2_6 | RANGE_FULL,
1120 * Display P3 uses same primaries and white-point as DCI-P3
1121 * linear transfer function makes this the same as DCI_P3_LINEAR.
1123 DISPLAY_P3_LINEAR = STANDARD_DCI_P3 | TRANSFER_LINEAR | RANGE_FULL,
1129 * Use same primaries and white-point as DCI-P3
1130 * but sRGB transfer function.
1132 DISPLAY_P3 = STANDARD_DCI_P3 | TRANSFER_SRGB | RANGE_FULL,
1138 * Use full range, gamma 2.2 transfer and Adobe RGB primaries
1139 * Note: Application is responsible for gamma encoding the data as
1140 * a 2.2 gamma encoding is not supported in HW.
1142 ADOBE_RGB = STANDARD_ADOBE_RGB | TRANSFER_GAMMA2_2 | RANGE_FULL,
1146 * ITU-R Recommendation 2020 (BT.2020)
1148 * Ultra High-definition television
1150 * Use full range, linear transfer and BT2020 standard
1152 BT2020_LINEAR = STANDARD_BT2020 | TRANSFER_LINEAR | RANGE_FULL,
1156 * ITU-R Recommendation 2020 (BT.2020)
1158 * Ultra High-definition television
1160 * Use full range, BT.709 transfer and BT2020 standard
1162 BT2020 = STANDARD_BT2020 | TRANSFER_SMPTE_170M | RANGE_FULL,
1165 * ITU-R Recommendation 2020 (BT.2020)
1167 * Ultra High-definition television
1169 * Use full range, SMPTE 2084 (PQ) transfer and BT2020 standard
1171 BT2020_PQ = STANDARD_BT2020 | TRANSFER_ST2084 | RANGE_FULL,
1175 * Data spaces for non-color formats
1179 * The buffer contains depth ranging measurements from a depth camera.
1180 * This value is valid with formats:
1181 * HAL_PIXEL_FORMAT_Y16: 16-bit samples, consisting of a depth measurement
1182 * and an associated confidence value. The 3 MSBs of the sample make
1183 * up the confidence value, and the low 13 LSBs of the sample make up
1184 * the depth measurement.
1185 * For the confidence section, 0 means 100% confidence, 1 means 0%
1186 * confidence. The mapping to a linear float confidence value between
1187 * 0.f and 1.f can be obtained with
1188 * float confidence = (((depthSample >> 13) - 1) & 0x7) / 7.0f;
1189 * The depth measurement can be extracted simply with
1190 * uint16_t range = (depthSample & 0x1FFF);
1191 * HAL_PIXEL_FORMAT_BLOB: A depth point cloud, as
1192 * a variable-length float (x,y,z, confidence) coordinate point list.
1193 * The point cloud will be represented with the android_depth_points
1200 * The buffer contains sensor events from sensor direct report.
1201 * This value is valid with formats:
1202 * HAL_PIXEL_FORMAT_BLOB: an array of sensor event structure that forms
1203 * a lock free queue. Format of sensor event structure is specified
1210 * Color modes that may be supported by a display.
1213 * Rendering intent generally defines the goal in mapping a source (input)
1214 * color to a destination device color for a given color mode.
1216 * It is important to keep in mind three cases where mapping may be applied:
1217 * 1. The source gamut is much smaller than the destination (display) gamut
1218 * 2. The source gamut is much larger than the destination gamut (this will
1219 * ordinarily be handled using colorimetric rendering, below)
1220 * 3. The source and destination gamuts are roughly equal, although not
1221 * completely overlapping
1222 * Also, a common requirement for mappings is that skin tones should be
1223 * preserved, or at least remain natural in appearance.
1225 * Colorimetric Rendering Intent (All cases):
1226 * Colorimetric indicates that colors should be preserved. In the case
1227 * that the source gamut lies wholly within the destination gamut or is
1228 * about the same (#1, #3), this will simply mean that no manipulations
1229 * (no saturation boost, for example) are applied. In the case where some
1230 * source colors lie outside the destination gamut (#2, #3), those will
1231 * need to be mapped to colors that are within the destination gamut,
1232 * while the already in-gamut colors remain unchanged.
1234 * Non-colorimetric transforms can take many forms. There are no hard
1235 * rules and it's left to the implementation to define.
1236 * Two common intents are described below.
1238 * Stretched-Gamut Enhancement Intent (Source < Destination):
1239 * When the destination gamut is much larger than the source gamut (#1), the
1240 * source primaries may be redefined to reflect the full extent of the
1241 * destination space, or to reflect an intermediate gamut.
1242 * Skin-tone preservation would likely be applied. An example might be sRGB
1243 * input displayed on a DCI-P3 capable device, with skin-tone preservation.
1245 * Within-Gamut Enhancement Intent (Source >= Destination):
1246 * When the device (destination) gamut is not larger than the source gamut
1247 * (#2 or #3), but the appearance of a larger gamut is desired, techniques
1248 * such as saturation boost may be applied to the source colors. Skin-tone
1249 * preservation may be applied. There is no unique method for within-gamut
1250 * enhancement; it would be defined within a flexible color mode.
1253 @export(name="android_color_mode_t", value_prefix="HAL_COLOR_MODE_")
1254 enum ColorMode : int32_t {
1256 * DEFAULT is the "native" gamut of the display.
1257 * White Point: Vendor/OEM defined
1258 * Panel Gamma: Vendor/OEM defined (typically 2.2)
1259 * Rendering Intent: Vendor/OEM defined (typically 'enhanced')
1264 * STANDARD_BT601_625 corresponds with display
1265 * settings that implement the ITU-R Recommendation BT.601
1266 * or Rec 601. Using 625 line version
1267 * Rendering Intent: Colorimetric
1273 * white (D65) 0.3127 0.3290
1275 * KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
1276 * for RGB conversion from the one purely determined by the primaries
1277 * to minimize the color shift into RGB space that uses BT.709
1280 * Gamma Correction (GC):
1282 * if Vlinear < 0.018
1283 * Vnonlinear = 4.500 * Vlinear
1285 * Vnonlinear = 1.099 * (Vlinear)^(0.45) – 0.099
1287 STANDARD_BT601_625 = 1,
1295 * white (D65) 0.3127 0.3290
1297 * Use the unadjusted KR = 0.222, KB = 0.071 luminance interpretation
1298 * for RGB conversion.
1300 * Gamma Correction (GC):
1302 * if Vlinear < 0.018
1303 * Vnonlinear = 4.500 * Vlinear
1305 * Vnonlinear = 1.099 * (Vlinear)^(0.45) – 0.099
1307 STANDARD_BT601_625_UNADJUSTED = 2,
1315 * white (D65) 0.3127 0.3290
1317 * KR = 0.299, KB = 0.114. This adjusts the luminance interpretation
1318 * for RGB conversion from the one purely determined by the primaries
1319 * to minimize the color shift into RGB space that uses BT.709
1322 * Gamma Correction (GC):
1324 * if Vlinear < 0.018
1325 * Vnonlinear = 4.500 * Vlinear
1327 * Vnonlinear = 1.099 * (Vlinear)^(0.45) – 0.099
1329 STANDARD_BT601_525 = 3,
1337 * white (D65) 0.3127 0.3290
1339 * Use the unadjusted KR = 0.212, KB = 0.087 luminance interpretation
1340 * for RGB conversion (as in SMPTE 240M).
1342 * Gamma Correction (GC):
1344 * if Vlinear < 0.018
1345 * Vnonlinear = 4.500 * Vlinear
1347 * Vnonlinear = 1.099 * (Vlinear)^(0.45) – 0.099
1349 STANDARD_BT601_525_UNADJUSTED = 4,
1352 * REC709 corresponds with display settings that implement
1353 * the ITU-R Recommendation BT.709 / Rec. 709 for high-definition television.
1354 * Rendering Intent: Colorimetric
1360 * white (D65) 0.3127 0.3290
1362 * HDTV REC709 Inverse Gamma Correction (IGC): V represents normalized
1363 * (with [0 to 1] range) value of R, G, or B.
1365 * if Vnonlinear < 0.081
1366 * Vlinear = Vnonlinear / 4.5
1368 * Vlinear = ((Vnonlinear + 0.099) / 1.099) ^ (1/0.45)
1370 * HDTV REC709 Gamma Correction (GC):
1372 * if Vlinear < 0.018
1373 * Vnonlinear = 4.5 * Vlinear
1375 * Vnonlinear = 1.099 * (Vlinear) ^ 0.45 – 0.099
1380 * DCI_P3 corresponds with display settings that implement
1381 * SMPTE EG 432-1 and SMPTE RP 431-2
1382 * Rendering Intent: Colorimetric
1388 * white (D65) 0.3127 0.3290
1395 * SRGB corresponds with display settings that implement
1396 * the sRGB color space. Uses the same primaries as ITU-R Recommendation
1398 * Rendering Intent: Colorimetric
1404 * white (D65) 0.3127 0.3290
1406 * PC/Internet (sRGB) Inverse Gamma Correction (IGC):
1408 * if Vnonlinear ≤ 0.03928
1409 * Vlinear = Vnonlinear / 12.92
1411 * Vlinear = ((Vnonlinear + 0.055)/1.055) ^ 2.4
1413 * PC/Internet (sRGB) Gamma Correction (GC):
1415 * if Vlinear ≤ 0.0031308
1416 * Vnonlinear = 12.92 * Vlinear
1418 * Vnonlinear = 1.055 * (Vlinear)^(1/2.4) – 0.055
1423 * ADOBE_RGB corresponds with the RGB color space developed
1424 * by Adobe Systems, Inc. in 1998.
1425 * Rendering Intent: Colorimetric
1431 * white (D65) 0.3127 0.3290
1438 * DISPLAY_P3 is a color space that uses the DCI_P3 primaries,
1439 * the D65 white point and the SRGB transfer functions.
1440 * Rendering Intent: Colorimetric
1446 * white (D65) 0.3127 0.3290
1448 * PC/Internet (sRGB) Gamma Correction (GC):
1450 * if Vlinear ≤ 0.0030186
1451 * Vnonlinear = 12.92 * Vlinear
1453 * Vnonlinear = 1.055 * (Vlinear)^(1/2.4) – 0.055
1455 * Note: In most cases sRGB transfer function will be fine.
1461 * Color transforms that may be applied by hardware composer to the whole
1464 @export(name="android_color_transform_t", value_prefix="HAL_COLOR_TRANSFORM_")
1465 enum ColorTransform : int32_t {
1466 /** Applies no transform to the output color */
1469 /** Applies an arbitrary transform defined by a 4x4 affine matrix */
1470 ARBITRARY_MATRIX = 1,
1473 * Applies a transform that inverts the value or luminance of the color, but
1474 * does not modify hue or saturation */
1477 /** Applies a transform that maps all colors to shades of gray */
1480 /** Applies a transform which corrects for protanopic color blindness */
1481 CORRECT_PROTANOPIA = 4,
1483 /** Applies a transform which corrects for deuteranopic color blindness */
1484 CORRECT_DEUTERANOPIA = 5,
1486 /** Applies a transform which corrects for tritanopic color blindness */
1487 CORRECT_TRITANOPIA = 6
1491 * Supported HDR formats. Must be kept in sync with equivalents in Display.java.
1493 @export(name="android_hdr_t", value_prefix="HAL_HDR_")
1494 enum Hdr : int32_t {
1495 /** Device supports Dolby Vision HDR */
1498 /** Device supports HDR10 */
1501 /** Device supports hybrid log-gamma HDR */