ASR_BASE
Change-Id: Icf3719cc0afe3eeb3edc7fa80a2eb5199ca9dda1
diff --git a/package/utils/adbd/src/include/system/audio.h b/package/utils/adbd/src/include/system/audio.h
new file mode 100644
index 0000000..b1827b0
--- /dev/null
+++ b/package/utils/adbd/src/include/system/audio.h
@@ -0,0 +1,1059 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_AUDIO_CORE_H
+#define ANDROID_AUDIO_CORE_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <cutils/bitops.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* The enums were moved here mostly from
+ * frameworks/base/include/media/AudioSystem.h
+ */
+
+/* device address used to refer to the standard remote submix */
+#define AUDIO_REMOTE_SUBMIX_DEVICE_ADDRESS "0"
+
+/* AudioFlinger and AudioPolicy services use I/O handles to identify audio sources and sinks */
+typedef int audio_io_handle_t;
+#define AUDIO_IO_HANDLE_NONE 0
+
+/* Audio stream types */
+typedef enum {
+ /* These values must kept in sync with
+ * frameworks/base/media/java/android/media/AudioSystem.java
+ */
+ AUDIO_STREAM_DEFAULT = -1,
+ AUDIO_STREAM_MIN = 0,
+ AUDIO_STREAM_VOICE_CALL = 0,
+ AUDIO_STREAM_SYSTEM = 1,
+ AUDIO_STREAM_RING = 2,
+ AUDIO_STREAM_MUSIC = 3,
+ AUDIO_STREAM_ALARM = 4,
+ AUDIO_STREAM_NOTIFICATION = 5,
+ AUDIO_STREAM_BLUETOOTH_SCO = 6,
+ AUDIO_STREAM_ENFORCED_AUDIBLE = 7, /* Sounds that cannot be muted by user and must be routed to speaker */
+ AUDIO_STREAM_DTMF = 8,
+ AUDIO_STREAM_TTS = 9,
+
+ AUDIO_STREAM_CNT,
+ AUDIO_STREAM_MAX = AUDIO_STREAM_CNT - 1,
+} audio_stream_type_t;
+
+/* Do not change these values without updating their counterparts
+ * in frameworks/base/media/java/android/media/AudioAttributes.java
+ */
+typedef enum {
+ AUDIO_CONTENT_TYPE_UNKNOWN = 0,
+ AUDIO_CONTENT_TYPE_SPEECH = 1,
+ AUDIO_CONTENT_TYPE_MUSIC = 2,
+ AUDIO_CONTENT_TYPE_MOVIE = 3,
+ AUDIO_CONTENT_TYPE_SONIFICATION = 4,
+
+ AUDIO_CONTENT_TYPE_CNT,
+ AUDIO_CONTENT_TYPE_MAX = AUDIO_CONTENT_TYPE_CNT - 1,
+} audio_content_type_t;
+
+/* Do not change these values without updating their counterparts
+ * in frameworks/base/media/java/android/media/AudioAttributes.java
+ */
+typedef enum {
+ AUDIO_USAGE_UNKNOWN = 0,
+ AUDIO_USAGE_MEDIA = 1,
+ AUDIO_USAGE_VOICE_COMMUNICATION = 2,
+ AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING = 3,
+ AUDIO_USAGE_ALARM = 4,
+ AUDIO_USAGE_NOTIFICATION = 5,
+ AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE = 6,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST = 7,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT = 8,
+ AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED = 9,
+ AUDIO_USAGE_NOTIFICATION_EVENT = 10,
+ AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY = 11,
+ AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE = 12,
+ AUDIO_USAGE_ASSISTANCE_SONIFICATION = 13,
+ AUDIO_USAGE_GAME = 14,
+
+ AUDIO_USAGE_CNT,
+ AUDIO_USAGE_MAX = AUDIO_USAGE_CNT - 1,
+} audio_usage_t;
+
+typedef uint32_t audio_flags_mask_t;
+
+/* Do not change these values without updating their counterparts
+ * in frameworks/base/media/java/android/media/AudioAttributes.java
+ */
+enum {
+ AUDIO_FLAG_AUDIBILITY_ENFORCED = 0x1,
+ AUDIO_FLAG_SECURE = 0x2,
+ AUDIO_FLAG_SCO = 0x4,
+};
+
+/* Do not change these values without updating their counterparts
+ * in frameworks/base/media/java/android/media/MediaRecorder.java,
+ * frameworks/av/services/audiopolicy/AudioPolicyService.cpp,
+ * and system/media/audio_effects/include/audio_effects/audio_effects_conf.h!
+ */
+typedef enum {
+ AUDIO_SOURCE_DEFAULT = 0,
+ AUDIO_SOURCE_MIC = 1,
+ AUDIO_SOURCE_VOICE_UPLINK = 2,
+ AUDIO_SOURCE_VOICE_DOWNLINK = 3,
+ AUDIO_SOURCE_VOICE_CALL = 4,
+ AUDIO_SOURCE_CAMCORDER = 5,
+ AUDIO_SOURCE_VOICE_RECOGNITION = 6,
+ AUDIO_SOURCE_VOICE_COMMUNICATION = 7,
+ AUDIO_SOURCE_REMOTE_SUBMIX = 8, /* Source for the mix to be presented remotely. */
+ /* An example of remote presentation is Wifi Display */
+ /* where a dongle attached to a TV can be used to */
+ /* play the mix captured by this audio source. */
+ AUDIO_SOURCE_CNT,
+ AUDIO_SOURCE_MAX = AUDIO_SOURCE_CNT - 1,
+ AUDIO_SOURCE_HOTWORD = 1999, /* A low-priority, preemptible audio source for
+ for background software hotword detection.
+ Same tuning as AUDIO_SOURCE_VOICE_RECOGNITION.
+ Used only internally to the framework. Not exposed
+ at the audio HAL. */
+} audio_source_t;
+
+/* Audio attributes */
+#define AUDIO_ATTRIBUTES_TAGS_MAX_SIZE 256
+typedef struct {
+ audio_content_type_t content_type;
+ audio_usage_t usage;
+ audio_source_t source;
+ audio_flags_mask_t flags;
+ char tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE]; /* UTF8 */
+} audio_attributes_t;
+
+/* special audio session values
+ * (XXX: should this be living in the audio effects land?)
+ */
+typedef enum {
+ /* session for effects attached to a particular output stream
+ * (value must be less than 0)
+ */
+ AUDIO_SESSION_OUTPUT_STAGE = -1,
+
+ /* session for effects applied to output mix. These effects can
+ * be moved by audio policy manager to another output stream
+ * (value must be 0)
+ */
+ AUDIO_SESSION_OUTPUT_MIX = 0,
+
+ /* application does not specify an explicit session ID to be used,
+ * and requests a new session ID to be allocated
+ * TODO use unique values for AUDIO_SESSION_OUTPUT_MIX and AUDIO_SESSION_ALLOCATE,
+ * after all uses have been updated from 0 to the appropriate symbol, and have been tested.
+ */
+ AUDIO_SESSION_ALLOCATE = 0,
+} audio_session_t;
+
+/* Audio sub formats (see enum audio_format). */
+
+/* PCM sub formats */
+typedef enum {
+ /* All of these are in native byte order */
+ AUDIO_FORMAT_PCM_SUB_16_BIT = 0x1, /* DO NOT CHANGE - PCM signed 16 bits */
+ AUDIO_FORMAT_PCM_SUB_8_BIT = 0x2, /* DO NOT CHANGE - PCM unsigned 8 bits */
+ AUDIO_FORMAT_PCM_SUB_32_BIT = 0x3, /* PCM signed .31 fixed point */
+ AUDIO_FORMAT_PCM_SUB_8_24_BIT = 0x4, /* PCM signed 7.24 fixed point */
+ AUDIO_FORMAT_PCM_SUB_FLOAT = 0x5, /* PCM single-precision floating point */
+ AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED = 0x6, /* PCM signed .23 fixed point packed in 3 bytes */
+} audio_format_pcm_sub_fmt_t;
+
+/* The audio_format_*_sub_fmt_t declarations are not currently used */
+
+/* MP3 sub format field definition : can use 11 LSBs in the same way as MP3
+ * frame header to specify bit rate, stereo mode, version...
+ */
+typedef enum {
+ AUDIO_FORMAT_MP3_SUB_NONE = 0x0,
+} audio_format_mp3_sub_fmt_t;
+
+/* AMR NB/WB sub format field definition: specify frame block interleaving,
+ * bandwidth efficient or octet aligned, encoding mode for recording...
+ */
+typedef enum {
+ AUDIO_FORMAT_AMR_SUB_NONE = 0x0,
+} audio_format_amr_sub_fmt_t;
+
+/* AAC sub format field definition: specify profile or bitrate for recording... */
+typedef enum {
+ AUDIO_FORMAT_AAC_SUB_NONE = 0x0,
+} audio_format_aac_sub_fmt_t;
+
+/* VORBIS sub format field definition: specify quality for recording... */
+typedef enum {
+ AUDIO_FORMAT_VORBIS_SUB_NONE = 0x0,
+} audio_format_vorbis_sub_fmt_t;
+
+/* Audio format consists of a main format field (upper 8 bits) and a sub format
+ * field (lower 24 bits).
+ *
+ * The main format indicates the main codec type. The sub format field
+ * indicates options and parameters for each format. The sub format is mainly
+ * used for record to indicate for instance the requested bitrate or profile.
+ * It can also be used for certain formats to give informations not present in
+ * the encoded audio stream (e.g. octet alignement for AMR).
+ */
+typedef enum {
+ AUDIO_FORMAT_INVALID = 0xFFFFFFFFUL,
+ AUDIO_FORMAT_DEFAULT = 0,
+ AUDIO_FORMAT_PCM = 0x00000000UL, /* DO NOT CHANGE */
+ AUDIO_FORMAT_MP3 = 0x01000000UL,
+ AUDIO_FORMAT_AMR_NB = 0x02000000UL,
+ AUDIO_FORMAT_AMR_WB = 0x03000000UL,
+ AUDIO_FORMAT_AAC = 0x04000000UL,
+ AUDIO_FORMAT_HE_AAC_V1 = 0x05000000UL,
+ AUDIO_FORMAT_HE_AAC_V2 = 0x06000000UL,
+ AUDIO_FORMAT_VORBIS = 0x07000000UL,
+ AUDIO_FORMAT_OPUS = 0x08000000UL,
+ AUDIO_FORMAT_AC3 = 0x09000000UL,
+ AUDIO_FORMAT_E_AC3 = 0x0A000000UL,
+ AUDIO_FORMAT_MAIN_MASK = 0xFF000000UL,
+ AUDIO_FORMAT_SUB_MASK = 0x00FFFFFFUL,
+
+ /* Aliases */
+ /* note != AudioFormat.ENCODING_PCM_16BIT */
+ AUDIO_FORMAT_PCM_16_BIT = (AUDIO_FORMAT_PCM |
+ AUDIO_FORMAT_PCM_SUB_16_BIT),
+ /* note != AudioFormat.ENCODING_PCM_8BIT */
+ AUDIO_FORMAT_PCM_8_BIT = (AUDIO_FORMAT_PCM |
+ AUDIO_FORMAT_PCM_SUB_8_BIT),
+ AUDIO_FORMAT_PCM_32_BIT = (AUDIO_FORMAT_PCM |
+ AUDIO_FORMAT_PCM_SUB_32_BIT),
+ AUDIO_FORMAT_PCM_8_24_BIT = (AUDIO_FORMAT_PCM |
+ AUDIO_FORMAT_PCM_SUB_8_24_BIT),
+ AUDIO_FORMAT_PCM_FLOAT = (AUDIO_FORMAT_PCM |
+ AUDIO_FORMAT_PCM_SUB_FLOAT),
+ AUDIO_FORMAT_PCM_24_BIT_PACKED = (AUDIO_FORMAT_PCM |
+ AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED),
+} audio_format_t;
+
+enum {
+ AUDIO_CHANNEL_NONE = 0x0,
+ /* output channels */
+ AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1,
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2,
+ AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4,
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8,
+ AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10,
+ AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20,
+ AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40,
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80,
+ AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100,
+ AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200,
+ AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400,
+ AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800,
+ AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000,
+ AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000,
+ AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000,
+ AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000,
+ AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000,
+ AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000,
+
+ AUDIO_CHANNEL_OUT_MONO = AUDIO_CHANNEL_OUT_FRONT_LEFT,
+ AUDIO_CHANNEL_OUT_STEREO = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT),
+ AUDIO_CHANNEL_OUT_QUAD = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT |
+ AUDIO_CHANNEL_OUT_BACK_LEFT |
+ AUDIO_CHANNEL_OUT_BACK_RIGHT),
+ AUDIO_CHANNEL_OUT_QUAD_BACK = AUDIO_CHANNEL_OUT_QUAD,
+ /* like AUDIO_CHANNEL_OUT_QUAD_BACK with *_SIDE_* instead of *_BACK_* */
+ AUDIO_CHANNEL_OUT_QUAD_SIDE = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT |
+ AUDIO_CHANNEL_OUT_SIDE_LEFT |
+ AUDIO_CHANNEL_OUT_SIDE_RIGHT),
+ AUDIO_CHANNEL_OUT_5POINT1 = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT |
+ AUDIO_CHANNEL_OUT_FRONT_CENTER |
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+ AUDIO_CHANNEL_OUT_BACK_LEFT |
+ AUDIO_CHANNEL_OUT_BACK_RIGHT),
+ AUDIO_CHANNEL_OUT_5POINT1_BACK = AUDIO_CHANNEL_OUT_5POINT1,
+ /* like AUDIO_CHANNEL_OUT_5POINT1_BACK with *_SIDE_* instead of *_BACK_* */
+ AUDIO_CHANNEL_OUT_5POINT1_SIDE = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT |
+ AUDIO_CHANNEL_OUT_FRONT_CENTER |
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+ AUDIO_CHANNEL_OUT_SIDE_LEFT |
+ AUDIO_CHANNEL_OUT_SIDE_RIGHT),
+ // matches the correct AudioFormat.CHANNEL_OUT_7POINT1_SURROUND definition for 7.1
+ AUDIO_CHANNEL_OUT_7POINT1 = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT |
+ AUDIO_CHANNEL_OUT_FRONT_CENTER |
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+ AUDIO_CHANNEL_OUT_BACK_LEFT |
+ AUDIO_CHANNEL_OUT_BACK_RIGHT |
+ AUDIO_CHANNEL_OUT_SIDE_LEFT |
+ AUDIO_CHANNEL_OUT_SIDE_RIGHT),
+ AUDIO_CHANNEL_OUT_ALL = (AUDIO_CHANNEL_OUT_FRONT_LEFT |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT |
+ AUDIO_CHANNEL_OUT_FRONT_CENTER |
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
+ AUDIO_CHANNEL_OUT_BACK_LEFT |
+ AUDIO_CHANNEL_OUT_BACK_RIGHT |
+ AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER |
+ AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER |
+ AUDIO_CHANNEL_OUT_BACK_CENTER|
+ AUDIO_CHANNEL_OUT_SIDE_LEFT|
+ AUDIO_CHANNEL_OUT_SIDE_RIGHT|
+ AUDIO_CHANNEL_OUT_TOP_CENTER|
+ AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT|
+ AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER|
+ AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT|
+ AUDIO_CHANNEL_OUT_TOP_BACK_LEFT|
+ AUDIO_CHANNEL_OUT_TOP_BACK_CENTER|
+ AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT),
+
+ /* input channels */
+ AUDIO_CHANNEL_IN_LEFT = 0x4,
+ AUDIO_CHANNEL_IN_RIGHT = 0x8,
+ AUDIO_CHANNEL_IN_FRONT = 0x10,
+ AUDIO_CHANNEL_IN_BACK = 0x20,
+ AUDIO_CHANNEL_IN_LEFT_PROCESSED = 0x40,
+ AUDIO_CHANNEL_IN_RIGHT_PROCESSED = 0x80,
+ AUDIO_CHANNEL_IN_FRONT_PROCESSED = 0x100,
+ AUDIO_CHANNEL_IN_BACK_PROCESSED = 0x200,
+ AUDIO_CHANNEL_IN_PRESSURE = 0x400,
+ AUDIO_CHANNEL_IN_X_AXIS = 0x800,
+ AUDIO_CHANNEL_IN_Y_AXIS = 0x1000,
+ AUDIO_CHANNEL_IN_Z_AXIS = 0x2000,
+ AUDIO_CHANNEL_IN_VOICE_UPLINK = 0x4000,
+ AUDIO_CHANNEL_IN_VOICE_DNLINK = 0x8000,
+
+ AUDIO_CHANNEL_IN_MONO = AUDIO_CHANNEL_IN_FRONT,
+ AUDIO_CHANNEL_IN_STEREO = (AUDIO_CHANNEL_IN_LEFT | AUDIO_CHANNEL_IN_RIGHT),
+ AUDIO_CHANNEL_IN_FRONT_BACK = (AUDIO_CHANNEL_IN_FRONT | AUDIO_CHANNEL_IN_BACK),
+ AUDIO_CHANNEL_IN_ALL = (AUDIO_CHANNEL_IN_LEFT |
+ AUDIO_CHANNEL_IN_RIGHT |
+ AUDIO_CHANNEL_IN_FRONT |
+ AUDIO_CHANNEL_IN_BACK|
+ AUDIO_CHANNEL_IN_LEFT_PROCESSED |
+ AUDIO_CHANNEL_IN_RIGHT_PROCESSED |
+ AUDIO_CHANNEL_IN_FRONT_PROCESSED |
+ AUDIO_CHANNEL_IN_BACK_PROCESSED|
+ AUDIO_CHANNEL_IN_PRESSURE |
+ AUDIO_CHANNEL_IN_X_AXIS |
+ AUDIO_CHANNEL_IN_Y_AXIS |
+ AUDIO_CHANNEL_IN_Z_AXIS |
+ AUDIO_CHANNEL_IN_VOICE_UPLINK |
+ AUDIO_CHANNEL_IN_VOICE_DNLINK),
+};
+
+/* A channel mask per se only defines the presence or absence of a channel, not the order.
+ * But see AUDIO_INTERLEAVE_* below for the platform convention of order.
+ */
+typedef uint32_t audio_channel_mask_t;
+
+/* Expresses the convention when stereo audio samples are stored interleaved
+ * in an array. This should improve readability by allowing code to use
+ * symbolic indices instead of hard-coded [0] and [1].
+ *
+ * For multi-channel beyond stereo, the platform convention is that channels
+ * are interleaved in order from least significant channel mask bit
+ * to most significant channel mask bit, with unused bits skipped.
+ * Any exceptions to this convention will be noted at the appropriate API.
+ */
+enum {
+ AUDIO_INTERLEAVE_LEFT = 0,
+ AUDIO_INTERLEAVE_RIGHT = 1,
+};
+
+typedef enum {
+ AUDIO_MODE_INVALID = -2,
+ AUDIO_MODE_CURRENT = -1,
+ AUDIO_MODE_NORMAL = 0,
+ AUDIO_MODE_RINGTONE = 1,
+ AUDIO_MODE_IN_CALL = 2,
+ AUDIO_MODE_IN_COMMUNICATION = 3,
+
+ AUDIO_MODE_CNT,
+ AUDIO_MODE_MAX = AUDIO_MODE_CNT - 1,
+} audio_mode_t;
+
+/* This enum is deprecated */
+typedef enum {
+ AUDIO_IN_ACOUSTICS_NONE = 0,
+ AUDIO_IN_ACOUSTICS_AGC_ENABLE = 0x0001,
+ AUDIO_IN_ACOUSTICS_AGC_DISABLE = 0,
+ AUDIO_IN_ACOUSTICS_NS_ENABLE = 0x0002,
+ AUDIO_IN_ACOUSTICS_NS_DISABLE = 0,
+ AUDIO_IN_ACOUSTICS_TX_IIR_ENABLE = 0x0004,
+ AUDIO_IN_ACOUSTICS_TX_DISABLE = 0,
+} audio_in_acoustics_t;
+
+enum {
+ AUDIO_DEVICE_NONE = 0x0,
+ /* reserved bits */
+ AUDIO_DEVICE_BIT_IN = 0x80000000,
+ AUDIO_DEVICE_BIT_DEFAULT = 0x40000000,
+ /* output devices */
+ AUDIO_DEVICE_OUT_EARPIECE = 0x1,
+ AUDIO_DEVICE_OUT_SPEAKER = 0x2,
+ AUDIO_DEVICE_OUT_WIRED_HEADSET = 0x4,
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE = 0x8,
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO = 0x10,
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET = 0x20,
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT = 0x40,
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP = 0x80,
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES = 0x100,
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER = 0x200,
+ AUDIO_DEVICE_OUT_AUX_DIGITAL = 0x400,
+ AUDIO_DEVICE_OUT_HDMI = AUDIO_DEVICE_OUT_AUX_DIGITAL,
+ /* uses an analog connection (multiplexed over the USB connector pins for instance) */
+ AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET = 0x800,
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET = 0x1000,
+ /* USB accessory mode: your Android device is a USB device and the dock is a USB host */
+ AUDIO_DEVICE_OUT_USB_ACCESSORY = 0x2000,
+ /* USB host mode: your Android device is a USB host and the dock is a USB device */
+ AUDIO_DEVICE_OUT_USB_DEVICE = 0x4000,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX = 0x8000,
+ /* Telephony voice TX path */
+ AUDIO_DEVICE_OUT_TELEPHONY_TX = 0x10000,
+ /* Analog jack with line impedance detected */
+ AUDIO_DEVICE_OUT_LINE = 0x20000,
+ /* HDMI Audio Return Channel */
+ AUDIO_DEVICE_OUT_HDMI_ARC = 0x40000,
+ /* S/PDIF out */
+ AUDIO_DEVICE_OUT_SPDIF = 0x80000,
+ /* FM transmitter out */
+ AUDIO_DEVICE_OUT_FM = 0x100000,
+ AUDIO_DEVICE_OUT_DEFAULT = AUDIO_DEVICE_BIT_DEFAULT,
+ AUDIO_DEVICE_OUT_ALL = (AUDIO_DEVICE_OUT_EARPIECE |
+ AUDIO_DEVICE_OUT_SPEAKER |
+ AUDIO_DEVICE_OUT_WIRED_HEADSET |
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO |
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT |
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER |
+ AUDIO_DEVICE_OUT_HDMI |
+ AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET |
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET |
+ AUDIO_DEVICE_OUT_USB_ACCESSORY |
+ AUDIO_DEVICE_OUT_USB_DEVICE |
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX |
+ AUDIO_DEVICE_OUT_TELEPHONY_TX |
+ AUDIO_DEVICE_OUT_LINE |
+ AUDIO_DEVICE_OUT_HDMI_ARC |
+ AUDIO_DEVICE_OUT_SPDIF |
+ AUDIO_DEVICE_OUT_FM |
+ AUDIO_DEVICE_OUT_DEFAULT),
+ AUDIO_DEVICE_OUT_ALL_A2DP = (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+ AUDIO_DEVICE_OUT_ALL_SCO = (AUDIO_DEVICE_OUT_BLUETOOTH_SCO |
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET |
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+ AUDIO_DEVICE_OUT_ALL_USB = (AUDIO_DEVICE_OUT_USB_ACCESSORY |
+ AUDIO_DEVICE_OUT_USB_DEVICE),
+
+ /* input devices */
+ AUDIO_DEVICE_IN_COMMUNICATION = AUDIO_DEVICE_BIT_IN | 0x1,
+ AUDIO_DEVICE_IN_AMBIENT = AUDIO_DEVICE_BIT_IN | 0x2,
+ AUDIO_DEVICE_IN_BUILTIN_MIC = AUDIO_DEVICE_BIT_IN | 0x4,
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET = AUDIO_DEVICE_BIT_IN | 0x8,
+ AUDIO_DEVICE_IN_WIRED_HEADSET = AUDIO_DEVICE_BIT_IN | 0x10,
+ AUDIO_DEVICE_IN_AUX_DIGITAL = AUDIO_DEVICE_BIT_IN | 0x20,
+ AUDIO_DEVICE_IN_HDMI = AUDIO_DEVICE_IN_AUX_DIGITAL,
+ /* Telephony voice RX path */
+ AUDIO_DEVICE_IN_VOICE_CALL = AUDIO_DEVICE_BIT_IN | 0x40,
+ AUDIO_DEVICE_IN_TELEPHONY_RX = AUDIO_DEVICE_IN_VOICE_CALL,
+ AUDIO_DEVICE_IN_BACK_MIC = AUDIO_DEVICE_BIT_IN | 0x80,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX = AUDIO_DEVICE_BIT_IN | 0x100,
+ AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET = AUDIO_DEVICE_BIT_IN | 0x200,
+ AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET = AUDIO_DEVICE_BIT_IN | 0x400,
+ AUDIO_DEVICE_IN_USB_ACCESSORY = AUDIO_DEVICE_BIT_IN | 0x800,
+ AUDIO_DEVICE_IN_USB_DEVICE = AUDIO_DEVICE_BIT_IN | 0x1000,
+ /* FM tuner input */
+ AUDIO_DEVICE_IN_FM_TUNER = AUDIO_DEVICE_BIT_IN | 0x2000,
+ /* TV tuner input */
+ AUDIO_DEVICE_IN_TV_TUNER = AUDIO_DEVICE_BIT_IN | 0x4000,
+ /* Analog jack with line impedance detected */
+ AUDIO_DEVICE_IN_LINE = AUDIO_DEVICE_BIT_IN | 0x8000,
+ /* S/PDIF in */
+ AUDIO_DEVICE_IN_SPDIF = AUDIO_DEVICE_BIT_IN | 0x10000,
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP = AUDIO_DEVICE_BIT_IN | 0x20000,
+ AUDIO_DEVICE_IN_DEFAULT = AUDIO_DEVICE_BIT_IN | AUDIO_DEVICE_BIT_DEFAULT,
+
+ AUDIO_DEVICE_IN_ALL = (AUDIO_DEVICE_IN_COMMUNICATION |
+ AUDIO_DEVICE_IN_AMBIENT |
+ AUDIO_DEVICE_IN_BUILTIN_MIC |
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET |
+ AUDIO_DEVICE_IN_WIRED_HEADSET |
+ AUDIO_DEVICE_IN_HDMI |
+ AUDIO_DEVICE_IN_TELEPHONY_RX |
+ AUDIO_DEVICE_IN_BACK_MIC |
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX |
+ AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET |
+ AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET |
+ AUDIO_DEVICE_IN_USB_ACCESSORY |
+ AUDIO_DEVICE_IN_USB_DEVICE |
+ AUDIO_DEVICE_IN_FM_TUNER |
+ AUDIO_DEVICE_IN_TV_TUNER |
+ AUDIO_DEVICE_IN_LINE |
+ AUDIO_DEVICE_IN_SPDIF |
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP |
+ AUDIO_DEVICE_IN_DEFAULT),
+ AUDIO_DEVICE_IN_ALL_SCO = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET,
+ AUDIO_DEVICE_IN_ALL_USB = (AUDIO_DEVICE_IN_USB_ACCESSORY |
+ AUDIO_DEVICE_IN_USB_DEVICE),
+};
+
+typedef uint32_t audio_devices_t;
+
+/* the audio output flags serve two purposes:
+ * - when an AudioTrack is created they indicate a "wish" to be connected to an
+ * output stream with attributes corresponding to the specified flags
+ * - when present in an output profile descriptor listed for a particular audio
+ * hardware module, they indicate that an output stream can be opened that
+ * supports the attributes indicated by the flags.
+ * the audio policy manager will try to match the flags in the request
+ * (when getOuput() is called) to an available output stream.
+ */
+typedef enum {
+ AUDIO_OUTPUT_FLAG_NONE = 0x0, // no attributes
+ AUDIO_OUTPUT_FLAG_DIRECT = 0x1, // this output directly connects a track
+ // to one output stream: no software mixer
+ AUDIO_OUTPUT_FLAG_PRIMARY = 0x2, // this output is the primary output of
+ // the device. It is unique and must be
+ // present. It is opened by default and
+ // receives routing, audio mode and volume
+ // controls related to voice calls.
+ AUDIO_OUTPUT_FLAG_FAST = 0x4, // output supports "fast tracks",
+ // defined elsewhere
+ AUDIO_OUTPUT_FLAG_DEEP_BUFFER = 0x8, // use deep audio buffers
+ AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD = 0x10, // offload playback of compressed
+ // streams to hardware codec
+ AUDIO_OUTPUT_FLAG_NON_BLOCKING = 0x20 // use non-blocking write
+} audio_output_flags_t;
+
+/* The audio input flags are analogous to audio output flags.
+ * Currently they are used only when an AudioRecord is created,
+ * to indicate a preference to be connected to an input stream with
+ * attributes corresponding to the specified flags.
+ */
+typedef enum {
+ AUDIO_INPUT_FLAG_NONE = 0x0, // no attributes
+ AUDIO_INPUT_FLAG_FAST = 0x1, // prefer an input that supports "fast tracks"
+} audio_input_flags_t;
+
+/* Additional information about compressed streams offloaded to
+ * hardware playback
+ * The version and size fields must be initialized by the caller by using
+ * one of the constants defined here.
+ */
+typedef struct {
+ uint16_t version; // version of the info structure
+ uint16_t size; // total size of the structure including version and size
+ uint32_t sample_rate; // sample rate in Hz
+ audio_channel_mask_t channel_mask; // channel mask
+ audio_format_t format; // audio format
+ audio_stream_type_t stream_type; // stream type
+ uint32_t bit_rate; // bit rate in bits per second
+ int64_t duration_us; // duration in microseconds, -1 if unknown
+ bool has_video; // true if stream is tied to a video stream
+ bool is_streaming; // true if streaming, false if local playback
+} audio_offload_info_t;
+
+#define AUDIO_MAKE_OFFLOAD_INFO_VERSION(maj,min) \
+ ((((maj) & 0xff) << 8) | ((min) & 0xff))
+
+#define AUDIO_OFFLOAD_INFO_VERSION_0_1 AUDIO_MAKE_OFFLOAD_INFO_VERSION(0, 1)
+#define AUDIO_OFFLOAD_INFO_VERSION_CURRENT AUDIO_OFFLOAD_INFO_VERSION_0_1
+
+static const audio_offload_info_t AUDIO_INFO_INITIALIZER = {
+ version: AUDIO_OFFLOAD_INFO_VERSION_CURRENT,
+ size: sizeof(audio_offload_info_t),
+ sample_rate: 0,
+ channel_mask: 0,
+ format: AUDIO_FORMAT_DEFAULT,
+ stream_type: AUDIO_STREAM_VOICE_CALL,
+ bit_rate: 0,
+ duration_us: 0,
+ has_video: false,
+ is_streaming: false
+};
+
+
+/* audio hw module handle functions or structures referencing a module */
+typedef int audio_module_handle_t;
+
+/******************************
+ * Volume control
+ *****************************/
+
+/* If the audio hardware supports gain control on some audio paths,
+ * the platform can expose them in the audio_policy.conf file. The audio HAL
+ * will then implement gain control functions that will use the following data
+ * structures. */
+
+/* Type of gain control exposed by an audio port */
+#define AUDIO_GAIN_MODE_JOINT 0x1 /* supports joint channel gain control */
+#define AUDIO_GAIN_MODE_CHANNELS 0x2 /* supports separate channel gain control */
+#define AUDIO_GAIN_MODE_RAMP 0x4 /* supports gain ramps */
+
+typedef uint32_t audio_gain_mode_t;
+
+
+/* An audio_gain struct is a representation of a gain stage.
+ * A gain stage is always attached to an audio port. */
+struct audio_gain {
+ audio_gain_mode_t mode; /* e.g. AUDIO_GAIN_MODE_JOINT */
+ audio_channel_mask_t channel_mask; /* channels which gain an be controlled.
+ N/A if AUDIO_GAIN_MODE_CHANNELS is not supported */
+ int min_value; /* minimum gain value in millibels */
+ int max_value; /* maximum gain value in millibels */
+ int default_value; /* default gain value in millibels */
+ unsigned int step_value; /* gain step in millibels */
+ unsigned int min_ramp_ms; /* minimum ramp duration in ms */
+ unsigned int max_ramp_ms; /* maximum ramp duration in ms */
+};
+
+/* The gain configuration structure is used to get or set the gain values of a
+ * given port */
+struct audio_gain_config {
+ int index; /* index of the corresponding audio_gain in the
+ audio_port gains[] table */
+ audio_gain_mode_t mode; /* mode requested for this command */
+ audio_channel_mask_t channel_mask; /* channels which gain value follows.
+ N/A in joint mode */
+ int values[sizeof(audio_channel_mask_t)]; /* gain values in millibels for each
+ channel ordered from LSb to MSb in channel mask.
+ The number of values is 1 in joint mode or
+ popcount(channel_mask) */
+ unsigned int ramp_duration_ms; /* ramp duration in ms */
+};
+
+/******************************
+ * Routing control
+ *****************************/
+
+/* Types defined here are used to describe an audio source or sink at internal
+ * framework interfaces (audio policy, patch panel) or at the audio HAL.
+ * Sink and sources are grouped in a concept of “audio port” representing an
+ * audio end point at the edge of the system managed by the module exposing
+ * the interface. */
+
+/* Audio port role: either source or sink */
+typedef enum {
+ AUDIO_PORT_ROLE_NONE,
+ AUDIO_PORT_ROLE_SOURCE,
+ AUDIO_PORT_ROLE_SINK,
+} audio_port_role_t;
+
+/* Audio port type indicates if it is a session (e.g AudioTrack),
+ * a mix (e.g PlaybackThread output) or a physical device
+ * (e.g AUDIO_DEVICE_OUT_SPEAKER) */
+typedef enum {
+ AUDIO_PORT_TYPE_NONE,
+ AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_PORT_TYPE_MIX,
+ AUDIO_PORT_TYPE_SESSION,
+} audio_port_type_t;
+
+/* Each port has a unique ID or handle allocated by policy manager */
+typedef int audio_port_handle_t;
+#define AUDIO_PORT_HANDLE_NONE 0
+
+
+/* maximum audio device address length */
+#define AUDIO_DEVICE_MAX_ADDRESS_LEN 32
+
+/* extension for audio port configuration structure when the audio port is a
+ * hardware device */
+struct audio_port_config_device_ext {
+ audio_module_handle_t hw_module; /* module the device is attached to */
+ audio_devices_t type; /* device type (e.g AUDIO_DEVICE_OUT_SPEAKER) */
+ char address[AUDIO_DEVICE_MAX_ADDRESS_LEN]; /* device address. "" if N/A */
+};
+
+/* extension for audio port configuration structure when the audio port is a
+ * sub mix */
+struct audio_port_config_mix_ext {
+ audio_module_handle_t hw_module; /* module the stream is attached to */
+ audio_io_handle_t handle; /* I/O handle of the input/output stream */
+ union {
+ //TODO: change use case for output streams: use strategy and mixer attributes
+ audio_stream_type_t stream;
+ audio_source_t source;
+ } usecase;
+};
+
+/* extension for audio port configuration structure when the audio port is an
+ * audio session */
+struct audio_port_config_session_ext {
+ audio_session_t session; /* audio session */
+};
+
+/* Flags indicating which fields are to be considered in struct audio_port_config */
+#define AUDIO_PORT_CONFIG_SAMPLE_RATE 0x1
+#define AUDIO_PORT_CONFIG_CHANNEL_MASK 0x2
+#define AUDIO_PORT_CONFIG_FORMAT 0x4
+#define AUDIO_PORT_CONFIG_GAIN 0x8
+#define AUDIO_PORT_CONFIG_ALL (AUDIO_PORT_CONFIG_SAMPLE_RATE | \
+ AUDIO_PORT_CONFIG_CHANNEL_MASK | \
+ AUDIO_PORT_CONFIG_FORMAT | \
+ AUDIO_PORT_CONFIG_GAIN)
+
+/* audio port configuration structure used to specify a particular configuration of
+ * an audio port */
+struct audio_port_config {
+ audio_port_handle_t id; /* port unique ID */
+ audio_port_role_t role; /* sink or source */
+ audio_port_type_t type; /* device, mix ... */
+ unsigned int config_mask; /* e.g AUDIO_PORT_CONFIG_ALL */
+ unsigned int sample_rate; /* sampling rate in Hz */
+ audio_channel_mask_t channel_mask; /* channel mask if applicable */
+ audio_format_t format; /* format if applicable */
+ struct audio_gain_config gain; /* gain to apply if applicable */
+ union {
+ struct audio_port_config_device_ext device; /* device specific info */
+ struct audio_port_config_mix_ext mix; /* mix specific info */
+ struct audio_port_config_session_ext session; /* session specific info */
+ } ext;
+};
+
+
+/* max number of sampling rates in audio port */
+#define AUDIO_PORT_MAX_SAMPLING_RATES 16
+/* max number of channel masks in audio port */
+#define AUDIO_PORT_MAX_CHANNEL_MASKS 16
+/* max number of audio formats in audio port */
+#define AUDIO_PORT_MAX_FORMATS 16
+/* max number of gain controls in audio port */
+#define AUDIO_PORT_MAX_GAINS 16
+
+/* extension for audio port structure when the audio port is a hardware device */
+struct audio_port_device_ext {
+ audio_module_handle_t hw_module; /* module the device is attached to */
+ audio_devices_t type; /* device type (e.g AUDIO_DEVICE_OUT_SPEAKER) */
+ char address[AUDIO_DEVICE_MAX_ADDRESS_LEN];
+};
+
+/* Latency class of the audio mix */
+typedef enum {
+ AUDIO_LATENCY_LOW,
+ AUDIO_LATENCY_NORMAL,
+} audio_mix_latency_class_t;
+
+/* extension for audio port structure when the audio port is a sub mix */
+struct audio_port_mix_ext {
+ audio_module_handle_t hw_module; /* module the stream is attached to */
+ audio_io_handle_t handle; /* I/O handle of the input.output stream */
+ audio_mix_latency_class_t latency_class; /* latency class */
+ // other attributes: routing strategies
+};
+
+/* extension for audio port structure when the audio port is an audio session */
+struct audio_port_session_ext {
+ audio_session_t session; /* audio session */
+};
+
+
+struct audio_port {
+ audio_port_handle_t id; /* port unique ID */
+ audio_port_role_t role; /* sink or source */
+ audio_port_type_t type; /* device, mix ... */
+ unsigned int num_sample_rates; /* number of sampling rates in following array */
+ unsigned int sample_rates[AUDIO_PORT_MAX_SAMPLING_RATES];
+ unsigned int num_channel_masks; /* number of channel masks in following array */
+ audio_channel_mask_t channel_masks[AUDIO_PORT_MAX_CHANNEL_MASKS];
+ unsigned int num_formats; /* number of formats in following array */
+ audio_format_t formats[AUDIO_PORT_MAX_FORMATS];
+ unsigned int num_gains; /* number of gains in following array */
+ struct audio_gain gains[AUDIO_PORT_MAX_GAINS];
+ struct audio_port_config active_config; /* current audio port configuration */
+ union {
+ struct audio_port_device_ext device;
+ struct audio_port_mix_ext mix;
+ struct audio_port_session_ext session;
+ } ext;
+};
+
+/* An audio patch represents a connection between one or more source ports and
+ * one or more sink ports. Patches are connected and disconnected by audio policy manager or by
+ * applications via framework APIs.
+ * Each patch is identified by a handle at the interface used to create that patch. For instance,
+ * when a patch is created by the audio HAL, the HAL allocates and returns a handle.
+ * This handle is unique to a given audio HAL hardware module.
+ * But the same patch receives another system wide unique handle allocated by the framework.
+ * This unique handle is used for all transactions inside the framework.
+ */
+typedef int audio_patch_handle_t;
+#define AUDIO_PATCH_HANDLE_NONE 0
+
+#define AUDIO_PATCH_PORTS_MAX 16
+
+struct audio_patch {
+ audio_patch_handle_t id; /* patch unique ID */
+ unsigned int num_sources; /* number of sources in following array */
+ struct audio_port_config sources[AUDIO_PATCH_PORTS_MAX];
+ unsigned int num_sinks; /* number of sinks in following array */
+ struct audio_port_config sinks[AUDIO_PATCH_PORTS_MAX];
+};
+
+
+static inline bool audio_is_output_device(audio_devices_t device)
+{
+ if (((device & AUDIO_DEVICE_BIT_IN) == 0) &&
+ (popcount(device) == 1) && ((device & ~AUDIO_DEVICE_OUT_ALL) == 0))
+ return true;
+ else
+ return false;
+}
+
+static inline bool audio_is_input_device(audio_devices_t device)
+{
+ if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+ device &= ~AUDIO_DEVICE_BIT_IN;
+ if ((popcount(device) == 1) && ((device & ~AUDIO_DEVICE_IN_ALL) == 0))
+ return true;
+ }
+ return false;
+}
+
+static inline bool audio_is_output_devices(audio_devices_t device)
+{
+ return (device & AUDIO_DEVICE_BIT_IN) == 0;
+}
+
+static inline bool audio_is_a2dp_in_device(audio_devices_t device)
+{
+ if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+ device &= ~AUDIO_DEVICE_BIT_IN;
+ if ((popcount(device) == 1) && (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP))
+ return true;
+ }
+ return false;
+}
+
+static inline bool audio_is_a2dp_out_device(audio_devices_t device)
+{
+ if ((popcount(device) == 1) && (device & AUDIO_DEVICE_OUT_ALL_A2DP))
+ return true;
+ else
+ return false;
+}
+
+// Deprecated - use audio_is_a2dp_out_device() instead
+static inline bool audio_is_a2dp_device(audio_devices_t device)
+{
+ return audio_is_a2dp_out_device(device);
+}
+
+static inline bool audio_is_bluetooth_sco_device(audio_devices_t device)
+{
+ if ((device & AUDIO_DEVICE_BIT_IN) == 0) {
+ if ((popcount(device) == 1) && ((device & ~AUDIO_DEVICE_OUT_ALL_SCO) == 0))
+ return true;
+ } else {
+ device &= ~AUDIO_DEVICE_BIT_IN;
+ if ((popcount(device) == 1) && ((device & ~AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) == 0))
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool audio_is_usb_out_device(audio_devices_t device)
+{
+ return ((popcount(device) == 1) && (device & AUDIO_DEVICE_OUT_ALL_USB));
+}
+
+static inline bool audio_is_usb_in_device(audio_devices_t device)
+{
+ if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+ device &= ~AUDIO_DEVICE_BIT_IN;
+ if (popcount(device) == 1 && (device & AUDIO_DEVICE_IN_ALL_USB) != 0)
+ return true;
+ }
+ return false;
+}
+
+/* OBSOLETE - use audio_is_usb_out_device() instead. */
+static inline bool audio_is_usb_device(audio_devices_t device)
+{
+ return audio_is_usb_out_device(device);
+}
+
+static inline bool audio_is_remote_submix_device(audio_devices_t device)
+{
+ if ((device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX) == AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+ || (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX) == AUDIO_DEVICE_IN_REMOTE_SUBMIX)
+ return true;
+ else
+ return false;
+}
+
+static inline bool audio_is_input_channel(audio_channel_mask_t channel)
+{
+ if ((channel & ~AUDIO_CHANNEL_IN_ALL) == 0)
+ return channel != 0;
+ else
+ return false;
+}
+
+static inline bool audio_is_output_channel(audio_channel_mask_t channel)
+{
+ if ((channel & ~AUDIO_CHANNEL_OUT_ALL) == 0)
+ return channel != 0;
+ else
+ return false;
+}
+
+/* Returns the number of channels from an input channel mask,
+ * used in the context of audio input or recording.
+ */
+static inline uint32_t audio_channel_count_from_in_mask(audio_channel_mask_t channel)
+{
+ return popcount(channel & AUDIO_CHANNEL_IN_ALL);
+}
+
+/* Returns the number of channels from an output channel mask,
+ * used in the context of audio output or playback.
+ */
+static inline uint32_t audio_channel_count_from_out_mask(audio_channel_mask_t channel)
+{
+ return popcount(channel & AUDIO_CHANNEL_OUT_ALL);
+}
+
+/* Derive an output channel mask from a channel count.
+ * This is to be used when the content channel mask is unknown. The 1, 2, 4, 5, 6, 7 and 8 channel
+ * cases are mapped to the standard game/home-theater layouts, but note that 4 is mapped to quad,
+ * and not stereo + FC + mono surround. A channel count of 3 is arbitrarily mapped to stereo + FC
+ * for continuity with stereo.
+ * Returns the matching channel mask, or 0 if the number of channels exceeds that of the
+ * configurations for which a default channel mask is defined.
+ */
+static inline audio_channel_mask_t audio_channel_out_mask_from_count(uint32_t channel_count)
+{
+ switch (channel_count) {
+ case 1:
+ return AUDIO_CHANNEL_OUT_MONO;
+ case 2:
+ return AUDIO_CHANNEL_OUT_STEREO;
+ case 3:
+ return (AUDIO_CHANNEL_OUT_STEREO | AUDIO_CHANNEL_OUT_FRONT_CENTER);
+ case 4: // 4.0
+ return AUDIO_CHANNEL_OUT_QUAD;
+ case 5: // 5.0
+ return (AUDIO_CHANNEL_OUT_QUAD | AUDIO_CHANNEL_OUT_FRONT_CENTER);
+ case 6: // 5.1
+ return AUDIO_CHANNEL_OUT_5POINT1;
+ case 7: // 6.1
+ return (AUDIO_CHANNEL_OUT_5POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER);
+ case 8:
+ return AUDIO_CHANNEL_OUT_7POINT1;
+ default:
+ return 0;
+ }
+}
+
+/* Similar to above, but for input. Currently handles only mono and stereo. */
+static inline audio_channel_mask_t audio_channel_in_mask_from_count(uint32_t channel_count)
+{
+ switch (channel_count) {
+ case 1:
+ return AUDIO_CHANNEL_IN_MONO;
+ case 2:
+ return AUDIO_CHANNEL_IN_STEREO;
+ default:
+ return 0;
+ }
+}
+
+static inline bool audio_is_valid_format(audio_format_t format)
+{
+ switch (format & AUDIO_FORMAT_MAIN_MASK) {
+ case AUDIO_FORMAT_PCM:
+ switch (format) {
+ case AUDIO_FORMAT_PCM_16_BIT:
+ case AUDIO_FORMAT_PCM_8_BIT:
+ case AUDIO_FORMAT_PCM_32_BIT:
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ case AUDIO_FORMAT_PCM_FLOAT:
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ return true;
+ default:
+ return false;
+ }
+ /* not reached */
+ case AUDIO_FORMAT_MP3:
+ case AUDIO_FORMAT_AMR_NB:
+ case AUDIO_FORMAT_AMR_WB:
+ case AUDIO_FORMAT_AAC:
+ case AUDIO_FORMAT_HE_AAC_V1:
+ case AUDIO_FORMAT_HE_AAC_V2:
+ case AUDIO_FORMAT_VORBIS:
+ case AUDIO_FORMAT_OPUS:
+ case AUDIO_FORMAT_AC3:
+ case AUDIO_FORMAT_E_AC3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool audio_is_linear_pcm(audio_format_t format)
+{
+ return ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM);
+}
+
+static inline size_t audio_bytes_per_sample(audio_format_t format)
+{
+ size_t size = 0;
+
+ switch (format) {
+ case AUDIO_FORMAT_PCM_32_BIT:
+ case AUDIO_FORMAT_PCM_8_24_BIT:
+ size = sizeof(int32_t);
+ break;
+ case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+ size = sizeof(uint8_t) * 3;
+ break;
+ case AUDIO_FORMAT_PCM_16_BIT:
+ size = sizeof(int16_t);
+ break;
+ case AUDIO_FORMAT_PCM_8_BIT:
+ size = sizeof(uint8_t);
+ break;
+ case AUDIO_FORMAT_PCM_FLOAT:
+ size = sizeof(float);
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ANDROID_AUDIO_CORE_H
diff --git a/package/utils/adbd/src/include/system/audio_policy.h b/package/utils/adbd/src/include/system/audio_policy.h
new file mode 100644
index 0000000..f805cdc
--- /dev/null
+++ b/package/utils/adbd/src/include/system/audio_policy.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_AUDIO_POLICY_CORE_H
+#define ANDROID_AUDIO_POLICY_CORE_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <cutils/bitops.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* The enums were moved here mostly from
+ * frameworks/base/include/media/AudioSystem.h
+ */
+
+/* device categories used for audio_policy->set_force_use() */
+typedef enum {
+ AUDIO_POLICY_FORCE_NONE,
+ AUDIO_POLICY_FORCE_SPEAKER,
+ AUDIO_POLICY_FORCE_HEADPHONES,
+ AUDIO_POLICY_FORCE_BT_SCO,
+ AUDIO_POLICY_FORCE_BT_A2DP,
+ AUDIO_POLICY_FORCE_WIRED_ACCESSORY,
+ AUDIO_POLICY_FORCE_BT_CAR_DOCK,
+ AUDIO_POLICY_FORCE_BT_DESK_DOCK,
+ AUDIO_POLICY_FORCE_ANALOG_DOCK,
+ AUDIO_POLICY_FORCE_DIGITAL_DOCK,
+ AUDIO_POLICY_FORCE_NO_BT_A2DP, /* A2DP sink is not preferred to speaker or wired HS */
+ AUDIO_POLICY_FORCE_SYSTEM_ENFORCED,
+
+ AUDIO_POLICY_FORCE_CFG_CNT,
+ AUDIO_POLICY_FORCE_CFG_MAX = AUDIO_POLICY_FORCE_CFG_CNT - 1,
+
+ AUDIO_POLICY_FORCE_DEFAULT = AUDIO_POLICY_FORCE_NONE,
+} audio_policy_forced_cfg_t;
+
+/* usages used for audio_policy->set_force_use() */
+typedef enum {
+ AUDIO_POLICY_FORCE_FOR_COMMUNICATION,
+ AUDIO_POLICY_FORCE_FOR_MEDIA,
+ AUDIO_POLICY_FORCE_FOR_RECORD,
+ AUDIO_POLICY_FORCE_FOR_DOCK,
+ AUDIO_POLICY_FORCE_FOR_SYSTEM,
+
+ AUDIO_POLICY_FORCE_USE_CNT,
+ AUDIO_POLICY_FORCE_USE_MAX = AUDIO_POLICY_FORCE_USE_CNT - 1,
+} audio_policy_force_use_t;
+
+/* device connection states used for audio_policy->set_device_connection_state()
+ */
+typedef enum {
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+
+ AUDIO_POLICY_DEVICE_STATE_CNT,
+ AUDIO_POLICY_DEVICE_STATE_MAX = AUDIO_POLICY_DEVICE_STATE_CNT - 1,
+} audio_policy_dev_state_t;
+
+typedef enum {
+ /* Used to generate a tone to notify the user of a
+ * notification/alarm/ringtone while they are in a call. */
+ AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION = 0,
+
+ AUDIO_POLICY_TONE_CNT,
+ AUDIO_POLICY_TONE_MAX = AUDIO_POLICY_TONE_CNT - 1,
+} audio_policy_tone_t;
+
+
+static inline bool audio_is_low_visibility(audio_stream_type_t stream)
+{
+ switch (stream) {
+ case AUDIO_STREAM_SYSTEM:
+ case AUDIO_STREAM_NOTIFICATION:
+ case AUDIO_STREAM_RING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // ANDROID_AUDIO_POLICY_CORE_H
diff --git a/package/utils/adbd/src/include/system/camera.h b/package/utils/adbd/src/include/system/camera.h
new file mode 100644
index 0000000..c676201
--- /dev/null
+++ b/package/utils/adbd/src/include/system/camera.h
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H
+#define SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <cutils/native_handle.h>
+#include <hardware/hardware.h>
+#include <hardware/gralloc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * A set of bit masks for specifying how the received preview frames are
+ * handled before the previewCallback() call.
+ *
+ * The least significant 3 bits of an "int" value are used for this purpose:
+ *
+ * ..... 0 0 0
+ * ^ ^ ^
+ * | | |---------> determine whether the callback is enabled or not
+ * | |-----------> determine whether the callback is one-shot or not
+ * |-------------> determine whether the frame is copied out or not
+ *
+ * WARNING: When a frame is sent directly without copying, it is the frame
+ * receiver's responsiblity to make sure that the frame data won't get
+ * corrupted by subsequent preview frames filled by the camera. This flag is
+ * recommended only when copying out data brings significant performance price
+ * and the handling/processing of the received frame data is always faster than
+ * the preview frame rate so that data corruption won't occur.
+ *
+ * For instance,
+ * 1. 0x00 disables the callback. In this case, copy out and one shot bits
+ * are ignored.
+ * 2. 0x01 enables a callback without copying out the received frames. A
+ * typical use case is the Camcorder application to avoid making costly
+ * frame copies.
+ * 3. 0x05 is enabling a callback with frame copied out repeatedly. A typical
+ * use case is the Camera application.
+ * 4. 0x07 is enabling a callback with frame copied out only once. A typical
+ * use case is the Barcode scanner application.
+ */
+
+enum {
+ CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK = 0x01,
+ CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK = 0x02,
+ CAMERA_FRAME_CALLBACK_FLAG_COPY_OUT_MASK = 0x04,
+ /** Typical use cases */
+ CAMERA_FRAME_CALLBACK_FLAG_NOOP = 0x00,
+ CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER = 0x01,
+ CAMERA_FRAME_CALLBACK_FLAG_CAMERA = 0x05,
+ CAMERA_FRAME_CALLBACK_FLAG_BARCODE_SCANNER = 0x07
+};
+
+/** msgType in notifyCallback and dataCallback functions */
+enum {
+ CAMERA_MSG_ERROR = 0x0001, // notifyCallback
+ CAMERA_MSG_SHUTTER = 0x0002, // notifyCallback
+ CAMERA_MSG_FOCUS = 0x0004, // notifyCallback
+ CAMERA_MSG_ZOOM = 0x0008, // notifyCallback
+ CAMERA_MSG_PREVIEW_FRAME = 0x0010, // dataCallback
+ CAMERA_MSG_VIDEO_FRAME = 0x0020, // data_timestamp_callback
+ CAMERA_MSG_POSTVIEW_FRAME = 0x0040, // dataCallback
+ CAMERA_MSG_RAW_IMAGE = 0x0080, // dataCallback
+ CAMERA_MSG_COMPRESSED_IMAGE = 0x0100, // dataCallback
+ CAMERA_MSG_RAW_IMAGE_NOTIFY = 0x0200, // dataCallback
+ // Preview frame metadata. This can be combined with
+ // CAMERA_MSG_PREVIEW_FRAME in dataCallback. For example, the apps can
+ // request FRAME and METADATA. Or the apps can request only FRAME or only
+ // METADATA.
+ CAMERA_MSG_PREVIEW_METADATA = 0x0400, // dataCallback
+ // Notify on autofocus start and stop. This is useful in continuous
+ // autofocus - FOCUS_MODE_CONTINUOUS_VIDEO and FOCUS_MODE_CONTINUOUS_PICTURE.
+ CAMERA_MSG_FOCUS_MOVE = 0x0800, // notifyCallback
+ CAMERA_MSG_ALL_MSGS = 0xFFFF
+};
+
+/** cmdType in sendCommand functions */
+enum {
+ CAMERA_CMD_START_SMOOTH_ZOOM = 1,
+ CAMERA_CMD_STOP_SMOOTH_ZOOM = 2,
+
+ /**
+ * Set the clockwise rotation of preview display (setPreviewDisplay) in
+ * degrees. This affects the preview frames and the picture displayed after
+ * snapshot. This method is useful for portrait mode applications. Note
+ * that preview display of front-facing cameras is flipped horizontally
+ * before the rotation, that is, the image is reflected along the central
+ * vertical axis of the camera sensor. So the users can see themselves as
+ * looking into a mirror.
+ *
+ * This does not affect the order of byte array of
+ * CAMERA_MSG_PREVIEW_FRAME, CAMERA_MSG_VIDEO_FRAME,
+ * CAMERA_MSG_POSTVIEW_FRAME, CAMERA_MSG_RAW_IMAGE, or
+ * CAMERA_MSG_COMPRESSED_IMAGE. This is allowed to be set during preview
+ * since API level 14.
+ */
+ CAMERA_CMD_SET_DISPLAY_ORIENTATION = 3,
+
+ /**
+ * cmdType to disable/enable shutter sound. In sendCommand passing arg1 =
+ * 0 will disable, while passing arg1 = 1 will enable the shutter sound.
+ */
+ CAMERA_CMD_ENABLE_SHUTTER_SOUND = 4,
+
+ /* cmdType to play recording sound */
+ CAMERA_CMD_PLAY_RECORDING_SOUND = 5,
+
+ /**
+ * Start the face detection. This should be called after preview is started.
+ * The camera will notify the listener of CAMERA_MSG_FACE and the detected
+ * faces in the preview frame. The detected faces may be the same as the
+ * previous ones. Apps should call CAMERA_CMD_STOP_FACE_DETECTION to stop
+ * the face detection. This method is supported if CameraParameters
+ * KEY_MAX_NUM_HW_DETECTED_FACES or KEY_MAX_NUM_SW_DETECTED_FACES is
+ * bigger than 0. Hardware and software face detection should not be running
+ * at the same time. If the face detection has started, apps should not send
+ * this again.
+ *
+ * In hardware face detection mode, CameraParameters KEY_WHITE_BALANCE,
+ * KEY_FOCUS_AREAS and KEY_METERING_AREAS have no effect.
+ *
+ * arg1 is the face detection type. It can be CAMERA_FACE_DETECTION_HW or
+ * CAMERA_FACE_DETECTION_SW. If the type of face detection requested is not
+ * supported, the HAL must return BAD_VALUE.
+ */
+ CAMERA_CMD_START_FACE_DETECTION = 6,
+
+ /**
+ * Stop the face detection.
+ */
+ CAMERA_CMD_STOP_FACE_DETECTION = 7,
+
+ /**
+ * Enable/disable focus move callback (CAMERA_MSG_FOCUS_MOVE). Passing
+ * arg1 = 0 will disable, while passing arg1 = 1 will enable the callback.
+ */
+ CAMERA_CMD_ENABLE_FOCUS_MOVE_MSG = 8,
+
+ /**
+ * Ping camera service to see if camera hardware is released.
+ *
+ * When any camera method returns error, the client can use ping command
+ * to see if the camera has been taken away by other clients. If the result
+ * is NO_ERROR, it means the camera hardware is not released. If the result
+ * is not NO_ERROR, the camera has been released and the existing client
+ * can silently finish itself or show a dialog.
+ */
+ CAMERA_CMD_PING = 9,
+
+ /**
+ * Configure the number of video buffers used for recording. The intended
+ * video buffer count for recording is passed as arg1, which must be
+ * greater than 0. This command must be sent before recording is started.
+ * This command returns INVALID_OPERATION error if it is sent after video
+ * recording is started, or the command is not supported at all. This
+ * command also returns a BAD_VALUE error if the intended video buffer
+ * count is non-positive or too big to be realized.
+ */
+ CAMERA_CMD_SET_VIDEO_BUFFER_COUNT = 10,
+};
+
+/** camera fatal errors */
+enum {
+ CAMERA_ERROR_UNKNOWN = 1,
+ /**
+ * Camera was released because another client has connected to the camera.
+ * The original client should call Camera::disconnect immediately after
+ * getting this notification. Otherwise, the camera will be released by
+ * camera service in a short time. The client should not call any method
+ * (except disconnect and sending CAMERA_CMD_PING) after getting this.
+ */
+ CAMERA_ERROR_RELEASED = 2,
+ CAMERA_ERROR_SERVER_DIED = 100
+};
+
+enum {
+ /** The facing of the camera is opposite to that of the screen. */
+ CAMERA_FACING_BACK = 0,
+ /** The facing of the camera is the same as that of the screen. */
+ CAMERA_FACING_FRONT = 1
+};
+
+enum {
+ /** Hardware face detection. It does not use much CPU. */
+ CAMERA_FACE_DETECTION_HW = 0,
+ /**
+ * Software face detection. It uses some CPU. Applications must use
+ * Camera.setPreviewTexture for preview in this mode.
+ */
+ CAMERA_FACE_DETECTION_SW = 1
+};
+
+/**
+ * The information of a face from camera face detection.
+ */
+typedef struct camera_face {
+ /**
+ * Bounds of the face [left, top, right, bottom]. (-1000, -1000) represents
+ * the top-left of the camera field of view, and (1000, 1000) represents the
+ * bottom-right of the field of view. The width and height cannot be 0 or
+ * negative. This is supported by both hardware and software face detection.
+ *
+ * The direction is relative to the sensor orientation, that is, what the
+ * sensor sees. The direction is not affected by the rotation or mirroring
+ * of CAMERA_CMD_SET_DISPLAY_ORIENTATION.
+ */
+ int32_t rect[4];
+
+ /**
+ * The confidence level of the face. The range is 1 to 100. 100 is the
+ * highest confidence. This is supported by both hardware and software
+ * face detection.
+ */
+ int32_t score;
+
+ /**
+ * An unique id per face while the face is visible to the tracker. If
+ * the face leaves the field-of-view and comes back, it will get a new
+ * id. If the value is 0, id is not supported.
+ */
+ int32_t id;
+
+ /**
+ * The coordinates of the center of the left eye. The range is -1000 to
+ * 1000. -2000, -2000 if this is not supported.
+ */
+ int32_t left_eye[2];
+
+ /**
+ * The coordinates of the center of the right eye. The range is -1000 to
+ * 1000. -2000, -2000 if this is not supported.
+ */
+ int32_t right_eye[2];
+
+ /**
+ * The coordinates of the center of the mouth. The range is -1000 to 1000.
+ * -2000, -2000 if this is not supported.
+ */
+ int32_t mouth[2];
+
+} camera_face_t;
+
+/**
+ * The metadata of the frame data.
+ */
+typedef struct camera_frame_metadata {
+ /**
+ * The number of detected faces in the frame.
+ */
+ int32_t number_of_faces;
+
+ /**
+ * An array of the detected faces. The length is number_of_faces.
+ */
+ camera_face_t *faces;
+} camera_frame_metadata_t;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* SYSTEM_CORE_INCLUDE_ANDROID_CAMERA_H */
diff --git a/package/utils/adbd/src/include/system/graphics.h b/package/utils/adbd/src/include/system/graphics.h
new file mode 100644
index 0000000..74d790f
--- /dev/null
+++ b/package/utils/adbd/src/include/system/graphics.h
@@ -0,0 +1,509 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H
+#define SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * If the HAL needs to create service threads to handle graphics related
+ * tasks, these threads need to run at HAL_PRIORITY_URGENT_DISPLAY priority
+ * if they can block the main rendering thread in any way.
+ *
+ * the priority of the current thread can be set with:
+ *
+ * #include <sys/resource.h>
+ * setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
+ *
+ */
+
+#define HAL_PRIORITY_URGENT_DISPLAY (-8)
+
+/**
+ * pixel format definitions
+ */
+
+enum {
+ /*
+ * "linear" color pixel formats:
+ *
+ * The pixel formats below contain sRGB data but are otherwise treated
+ * as linear formats, i.e.: no special operation is performed when
+ * reading or writing into a buffer in one of these formats
+ */
+ HAL_PIXEL_FORMAT_RGBA_8888 = 1,
+ HAL_PIXEL_FORMAT_RGBX_8888 = 2,
+ HAL_PIXEL_FORMAT_RGB_888 = 3,
+ HAL_PIXEL_FORMAT_RGB_565 = 4,
+ HAL_PIXEL_FORMAT_BGRA_8888 = 5,
+
+ /*
+ * sRGB color pixel formats:
+ *
+ * The red, green and blue components are stored in sRGB space, and converted
+ * to linear space when read, using the standard sRGB to linear equation:
+ *
+ * Clinear = Csrgb / 12.92 for Csrgb <= 0.04045
+ * = (Csrgb + 0.055 / 1.055)^2.4 for Csrgb > 0.04045
+ *
+ * When written the inverse transformation is performed:
+ *
+ * Csrgb = 12.92 * Clinear for Clinear <= 0.0031308
+ * = 1.055 * Clinear^(1/2.4) - 0.055 for Clinear > 0.0031308
+ *
+ *
+ * The alpha component, if present, is always stored in linear space and
+ * is left unmodified when read or written.
+ *
+ */
+ HAL_PIXEL_FORMAT_sRGB_A_8888 = 0xC,
+ HAL_PIXEL_FORMAT_sRGB_X_8888 = 0xD,
+
+ /*
+ * 0x100 - 0x1FF
+ *
+ * This range is reserved for pixel formats that are specific to the HAL
+ * implementation. Implementations can use any value in this range to
+ * communicate video pixel formats between their HAL modules. These formats
+ * must not have an alpha channel. Additionally, an EGLimage created from a
+ * gralloc buffer of one of these formats must be supported for use with the
+ * GL_OES_EGL_image_external OpenGL ES extension.
+ */
+
+ /*
+ * Android YUV format:
+ *
+ * This format is exposed outside of the HAL to software decoders and
+ * applications. EGLImageKHR must support it in conjunction with the
+ * OES_EGL_image_external extension.
+ *
+ * YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed
+ * by (W/2) x (H/2) Cr and Cb planes.
+ *
+ * This format assumes
+ * - an even width
+ * - an even height
+ * - a horizontal stride multiple of 16 pixels
+ * - a vertical stride equal to the height
+ *
+ * y_size = stride * height
+ * c_stride = ALIGN(stride/2, 16)
+ * c_size = c_stride * height/2
+ * size = y_size + c_size * 2
+ * cr_offset = y_size
+ * cb_offset = y_size + c_size
+ *
+ */
+ HAL_PIXEL_FORMAT_YV12 = 0x32315659, // YCrCb 4:2:0 Planar
+
+
+ /*
+ * Android Y8 format:
+ *
+ * This format is exposed outside of the HAL to the framework.
+ * The expected gralloc usage flags are SW_* and HW_CAMERA_*,
+ * and no other HW_ flags will be used.
+ *
+ * Y8 is a YUV planar format comprised of a WxH Y plane,
+ * with each pixel being represented by 8 bits.
+ *
+ * It is equivalent to just the Y plane from YV12.
+ *
+ * This format assumes
+ * - an even width
+ * - an even height
+ * - a horizontal stride multiple of 16 pixels
+ * - a vertical stride equal to the height
+ *
+ * size = stride * height
+ *
+ */
+ HAL_PIXEL_FORMAT_Y8 = 0x20203859,
+
+ /*
+ * Android Y16 format:
+ *
+ * This format is exposed outside of the HAL to the framework.
+ * The expected gralloc usage flags are SW_* and HW_CAMERA_*,
+ * and no other HW_ flags will be used.
+ *
+ * Y16 is a YUV planar format comprised of a WxH Y plane,
+ * with each pixel being represented by 16 bits.
+ *
+ * It is just like Y8, but has double the bits per pixel (little endian).
+ *
+ * This format assumes
+ * - an even width
+ * - an even height
+ * - a horizontal stride multiple of 16 pixels
+ * - a vertical stride equal to the height
+ * - strides are specified in pixels, not in bytes
+ *
+ * size = stride * height * 2
+ *
+ */
+ HAL_PIXEL_FORMAT_Y16 = 0x20363159,
+
+ /*
+ * Android RAW sensor format:
+ *
+ * This format is exposed outside of the camera HAL to applications.
+ *
+ * RAW_SENSOR is a single-channel, 16-bit, little endian format, typically
+ * representing raw Bayer-pattern images from an image sensor, with minimal
+ * processing.
+ *
+ * The exact pixel layout of the data in the buffer is sensor-dependent, and
+ * needs to be queried from the camera device.
+ *
+ * Generally, not all 16 bits are used; more common values are 10 or 12
+ * bits. If not all bits are used, the lower-order bits are filled first.
+ * All parameters to interpret the raw data (black and white points,
+ * color space, etc) must be queried from the camera device.
+ *
+ * This format assumes
+ * - an even width
+ * - an even height
+ * - a horizontal stride multiple of 16 pixels
+ * - a vertical stride equal to the height
+ * - strides are specified in pixels, not in bytes
+ *
+ * size = stride * height * 2
+ *
+ * This format must be accepted by the gralloc module when used with the
+ * following usage flags:
+ * - GRALLOC_USAGE_HW_CAMERA_*
+ * - GRALLOC_USAGE_SW_*
+ * - GRALLOC_USAGE_RENDERSCRIPT
+ */
+ HAL_PIXEL_FORMAT_RAW16 = 0x20,
+ HAL_PIXEL_FORMAT_RAW_SENSOR = 0x20, // TODO(rubenbrunk): Remove RAW_SENSOR.
+
+ /*
+ * Android RAW10 format:
+ *
+ * This format is exposed outside of the camera HAL to applications.
+ *
+ * RAW10 is a single-channel, 10-bit per pixel, densely packed, unprocessed
+ * format, representing raw Bayer-pattern images coming from an image sensor.
+ *
+ * In an image buffer with this format, starting from the first pixel, each 4
+ * consecutive pixels are packed into 5 bytes (40 bits). Each one of the first
+ * 4 bytes contains the top 8 bits of each pixel, The fifth byte contains the
+ * 2 least significant bits of the 4 pixels, the exact layout data for each 4
+ * consecutive pixels is illustrated below (Pi[j] stands for the jth bit of
+ * the ith pixel):
+ *
+ * bit 7 bit 0
+ * =====|=====|=====|=====|=====|=====|=====|=====|
+ * Byte 0: |P0[9]|P0[8]|P0[7]|P0[6]|P0[5]|P0[4]|P0[3]|P0[2]|
+ * |-----|-----|-----|-----|-----|-----|-----|-----|
+ * Byte 1: |P1[9]|P1[8]|P1[7]|P1[6]|P1[5]|P1[4]|P1[3]|P1[2]|
+ * |-----|-----|-----|-----|-----|-----|-----|-----|
+ * Byte 2: |P2[9]|P2[8]|P2[7]|P2[6]|P2[5]|P2[4]|P2[3]|P2[2]|
+ * |-----|-----|-----|-----|-----|-----|-----|-----|
+ * Byte 3: |P3[9]|P3[8]|P3[7]|P3[6]|P3[5]|P3[4]|P3[3]|P3[2]|
+ * |-----|-----|-----|-----|-----|-----|-----|-----|
+ * Byte 4: |P3[1]|P3[0]|P2[1]|P2[0]|P1[1]|P1[0]|P0[1]|P0[0]|
+ * ===============================================
+ *
+ * This format assumes
+ * - a width multiple of 4 pixels
+ * - an even height
+ * - a horizontal stride equal to the width
+ * - a vertical stride equal to the height
+ * - strides are specified in pixels, not in bytes
+ *
+ * size = stride * height * 10 / 8
+ *
+ * This format must be accepted by the gralloc module when used with the
+ * following usage flags:
+ * - GRALLOC_USAGE_HW_CAMERA_*
+ * - GRALLOC_USAGE_SW_*
+ * - GRALLOC_USAGE_RENDERSCRIPT
+ */
+ HAL_PIXEL_FORMAT_RAW10 = 0x25,
+
+ /*
+ * Android opaque RAW format:
+ *
+ * This format is exposed outside of the camera HAL to applications.
+ *
+ * RAW_OPAQUE is a format for unprocessed raw image buffers coming from an
+ * image sensor. The actual structure of buffers of this format is
+ * implementation-dependent.
+ *
+ * This format must be accepted by the gralloc module when used with the
+ * following usage flags:
+ * - GRALLOC_USAGE_HW_CAMERA_*
+ * - GRALLOC_USAGE_SW_*
+ * - GRALLOC_USAGE_RENDERSCRIPT
+ */
+ HAL_PIXEL_FORMAT_RAW_OPAQUE = 0x24,
+
+ /*
+ * Android binary blob graphics buffer format:
+ *
+ * This format is used to carry task-specific data which does not have a
+ * standard image structure. The details of the format are left to the two
+ * endpoints.
+ *
+ * A typical use case is for transporting JPEG-compressed images from the
+ * Camera HAL to the framework or to applications.
+ *
+ * Buffers of this format must have a height of 1, and width equal to their
+ * size in bytes.
+ */
+ HAL_PIXEL_FORMAT_BLOB = 0x21,
+
+ /*
+ * Android format indicating that the choice of format is entirely up to the
+ * device-specific Gralloc implementation.
+ *
+ * The Gralloc implementation should examine the usage bits passed in when
+ * allocating a buffer with this format, and it should derive the pixel
+ * format from those usage flags. This format will never be used with any
+ * of the GRALLOC_USAGE_SW_* usage flags.
+ *
+ * If a buffer of this format is to be used as an OpenGL ES texture, the
+ * framework will assume that sampling the texture will always return an
+ * alpha value of 1.0 (i.e. the buffer contains only opaque pixel values).
+ *
+ */
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED = 0x22,
+
+ /*
+ * Android flexible YCbCr formats
+ *
+ * This format allows platforms to use an efficient YCbCr/YCrCb buffer
+ * layout, while still describing the buffer layout in a way accessible to
+ * the CPU in a device-independent manner. While called YCbCr, it can be
+ * used to describe formats with either chromatic ordering, as well as
+ * whole planar or semiplanar layouts.
+ *
+ * struct android_ycbcr (below) is the the struct used to describe it.
+ *
+ * This format must be accepted by the gralloc module when
+ * USAGE_HW_CAMERA_WRITE and USAGE_SW_READ_* are set.
+ *
+ * This format is locked for use by gralloc's (*lock_ycbcr) method, and
+ * locking with the (*lock) method will return an error.
+ */
+ HAL_PIXEL_FORMAT_YCbCr_420_888 = 0x23,
+
+ /* Legacy formats (deprecated), used by ImageFormat.java */
+ HAL_PIXEL_FORMAT_YCbCr_422_SP = 0x10, // NV16
+ HAL_PIXEL_FORMAT_YCrCb_420_SP = 0x11, // NV21
+ HAL_PIXEL_FORMAT_YCbCr_422_I = 0x14, // YUY2
+};
+
+/*
+ * Structure for describing YCbCr formats for consumption by applications.
+ * This is used with HAL_PIXEL_FORMAT_YCbCr_*_888.
+ *
+ * Buffer chroma subsampling is defined in the format.
+ * e.g. HAL_PIXEL_FORMAT_YCbCr_420_888 has subsampling 4:2:0.
+ *
+ * Buffers must have a 8 bit depth.
+ *
+ * @y, @cb, and @cr point to the first byte of their respective planes.
+ *
+ * Stride describes the distance in bytes from the first value of one row of
+ * the image to the first value of the next row. It includes the width of the
+ * image plus padding.
+ * @ystride is the stride of the luma plane.
+ * @cstride is the stride of the chroma planes.
+ *
+ * @chroma_step is the distance in bytes from one chroma pixel value to the
+ * next. This is 2 bytes for semiplanar (because chroma values are interleaved
+ * and each chroma value is one byte) and 1 for planar.
+ */
+
+struct android_ycbcr {
+ void *y;
+ void *cb;
+ void *cr;
+ size_t ystride;
+ size_t cstride;
+ size_t chroma_step;
+
+ /** reserved for future use, set to 0 by gralloc's (*lock_ycbcr)() */
+ uint32_t reserved[8];
+};
+
+/**
+ * Transformation definitions
+ *
+ * IMPORTANT NOTE:
+ * HAL_TRANSFORM_ROT_90 is applied CLOCKWISE and AFTER HAL_TRANSFORM_FLIP_{H|V}.
+ *
+ */
+
+enum {
+ /* flip source image horizontally (around the vertical axis) */
+ HAL_TRANSFORM_FLIP_H = 0x01,
+ /* flip source image vertically (around the horizontal axis)*/
+ HAL_TRANSFORM_FLIP_V = 0x02,
+ /* rotate source image 90 degrees clockwise */
+ HAL_TRANSFORM_ROT_90 = 0x04,
+ /* rotate source image 180 degrees */
+ HAL_TRANSFORM_ROT_180 = 0x03,
+ /* rotate source image 270 degrees clockwise */
+ HAL_TRANSFORM_ROT_270 = 0x07,
+ /* don't use. see system/window.h */
+ HAL_TRANSFORM_RESERVED = 0x08,
+};
+
+/**
+ * Colorspace Definitions
+ * ======================
+ *
+ * Colorspace is the definition of how pixel values should be interpreted.
+ * It includes primaries (including white point) and the transfer
+ * characteristic function, which describes both gamma curve and numeric
+ * range (within the bit depth).
+ */
+
+enum {
+ /*
+ * Arbitrary colorspace with manually defined characteristics.
+ * Colorspace definition must be communicated separately.
+ *
+ * This is used when specifying primaries, transfer characteristics,
+ * etc. separately.
+ *
+ * A typical use case is in video encoding parameters (e.g. for H.264),
+ * where a colorspace can have separately defined primaries, transfer
+ * characteristics, etc.
+ */
+ HAL_COLORSPACE_ARBITRARY = 0x1,
+
+ /*
+ * YCbCr Colorspaces
+ * -----------------
+ *
+ * Primaries are given using (x,y) coordinates in the CIE 1931 definition
+ * of x and y specified by ISO 11664-1.
+ *
+ * Transfer characteristics are the opto-electronic transfer characteristic
+ * at the source as a function of linear optical intensity (luminance).
+ */
+
+ /*
+ * JPEG File Interchange Format (JFIF)
+ *
+ * Same model as BT.601-625, but all values (Y, Cb, Cr) range from 0 to 255
+ *
+ * Transfer characteristic curve:
+ * E = 1.099 * L ^ 0.45 - 0.099, 1.00 >= L >= 0.018
+ * E = 4.500 L, 0.018 > L >= 0
+ * L - luminance of image 0 <= L <= 1 for conventional colorimetry
+ * E - corresponding electrical signal
+ *
+ * Primaries: x y
+ * green 0.290 0.600
+ * blue 0.150 0.060
+ * red 0.640 0.330
+ * white (D65) 0.3127 0.3290
+ */
+ HAL_COLORSPACE_JFIF = 0x101,
+
+ /*
+ * ITU-R Recommendation 601 (BT.601) - 625-line
+ *
+ * Standard-definition television, 625 Lines (PAL)
+ *
+ * For 8-bit-depth formats:
+ * Luma (Y) samples should range from 16 to 235, inclusive
+ * Chroma (Cb, Cr) samples should range from 16 to 240, inclusive
+ *
+ * For 10-bit-depth formats:
+ * Luma (Y) samples should range from 64 to 940, inclusive
+ * Chroma (Cb, Cr) samples should range from 64 to 960, inclusive
+ *
+ * Transfer characteristic curve:
+ * E = 1.099 * L ^ 0.45 - 0.099, 1.00 >= L >= 0.018
+ * E = 4.500 L, 0.018 > L >= 0
+ * L - luminance of image 0 <= L <= 1 for conventional colorimetry
+ * E - corresponding electrical signal
+ *
+ * Primaries: x y
+ * green 0.290 0.600
+ * blue 0.150 0.060
+ * red 0.640 0.330
+ * white (D65) 0.3127 0.3290
+ */
+ HAL_COLORSPACE_BT601_625 = 0x102,
+
+ /*
+ * ITU-R Recommendation 601 (BT.601) - 525-line
+ *
+ * Standard-definition television, 525 Lines (NTSC)
+ *
+ * For 8-bit-depth formats:
+ * Luma (Y) samples should range from 16 to 235, inclusive
+ * Chroma (Cb, Cr) samples should range from 16 to 240, inclusive
+ *
+ * For 10-bit-depth formats:
+ * Luma (Y) samples should range from 64 to 940, inclusive
+ * Chroma (Cb, Cr) samples should range from 64 to 960, inclusive
+ *
+ * Transfer characteristic curve:
+ * E = 1.099 * L ^ 0.45 - 0.099, 1.00 >= L >= 0.018
+ * E = 4.500 L, 0.018 > L >= 0
+ * L - luminance of image 0 <= L <= 1 for conventional colorimetry
+ * E - corresponding electrical signal
+ *
+ * Primaries: x y
+ * green 0.310 0.595
+ * blue 0.155 0.070
+ * red 0.630 0.340
+ * white (D65) 0.3127 0.3290
+ */
+ HAL_COLORSPACE_BT601_525 = 0x103,
+
+ /*
+ * ITU-R Recommendation 709 (BT.709)
+ *
+ * High-definition television
+ *
+ * For 8-bit-depth formats:
+ * Luma (Y) samples should range from 16 to 235, inclusive
+ * Chroma (Cb, Cr) samples should range from 16 to 240, inclusive
+ *
+ * For 10-bit-depth formats:
+ * Luma (Y) samples should range from 64 to 940, inclusive
+ * Chroma (Cb, Cr) samples should range from 64 to 960, inclusive
+ *
+ * Primaries: x y
+ * green 0.300 0.600
+ * blue 0.150 0.060
+ * red 0.640 0.330
+ * white (D65) 0.3127 0.3290
+ */
+ HAL_COLORSPACE_BT709 = 0x104,
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SYSTEM_CORE_INCLUDE_ANDROID_GRAPHICS_H */
diff --git a/package/utils/adbd/src/include/system/sound_trigger.h b/package/utils/adbd/src/include/system/sound_trigger.h
new file mode 100644
index 0000000..0270baa
--- /dev/null
+++ b/package/utils/adbd/src/include/system/sound_trigger.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SOUND_TRIGGER_H
+#define ANDROID_SOUND_TRIGGER_H
+
+#include <stdbool.h>
+
+#define SOUND_TRIGGER_MAX_STRING_LEN 64 /* max length of strings in properties or
+ descriptor structs */
+#define SOUND_TRIGGER_MAX_LOCALE_LEN 6 /* max length of locale string. e.g en_US */
+#define SOUND_TRIGGER_MAX_USERS 10 /* max number of concurrent users */
+#define SOUND_TRIGGER_MAX_PHRASES 10 /* max number of concurrent phrases */
+
+#define RECOGNITION_MODE_VOICE_TRIGGER 0x1 /* simple voice trigger */
+#define RECOGNITION_MODE_USER_IDENTIFICATION 0x2 /* trigger only if one user in model identified */
+#define RECOGNITION_MODE_USER_AUTHENTICATION 0x4 /* trigger only if one user in mode
+ authenticated */
+
+#define RECOGNITION_STATUS_SUCCESS 0
+#define RECOGNITION_STATUS_ABORT 1
+#define RECOGNITION_STATUS_FAILURE 2
+
+
+typedef enum {
+ SOUND_MODEL_TYPE_UNKNOWN = -1, /* use for unspecified sound model type */
+ SOUND_MODEL_TYPE_KEYPHRASE = 0 /* use for key phrase sound models */
+} sound_trigger_sound_model_type_t;
+
+
+typedef struct sound_trigger_uuid_s {
+ unsigned int timeLow;
+ unsigned short timeMid;
+ unsigned short timeHiAndVersion;
+ unsigned short clockSeq;
+ unsigned char node[6];
+} sound_trigger_uuid_t;
+
+/*
+ * sound trigger implementation descriptor read by the framework via get_properties().
+ * Used by SoundTrigger service to report to applications and manage concurrency and policy.
+ */
+struct sound_trigger_properties {
+ char implementor[SOUND_TRIGGER_MAX_STRING_LEN]; /* implementor name */
+ char description[SOUND_TRIGGER_MAX_STRING_LEN]; /* implementation description */
+ unsigned int version; /* implementation version */
+ sound_trigger_uuid_t uuid; /* unique implementation ID.
+ Must change with version each version */
+ unsigned int max_sound_models; /* maximum number of concurrent sound models
+ loaded */
+ unsigned int max_key_phrases; /* maximum number of key phrases */
+ unsigned int max_users; /* maximum number of concurrent users detected */
+ unsigned int recognition_modes; /* all supported modes.
+ e.g RECOGNITION_MODE_VOICE_TRIGGER */
+ bool capture_transition; /* supports seamless transition from detection
+ to capture */
+ unsigned int max_buffer_ms; /* maximum buffering capacity in ms if
+ capture_transition is true*/
+ bool concurrent_capture; /* supports capture by other use cases while
+ detection is active */
+ unsigned int power_consumption_mw; /* Rated power consumption when detection is active
+ with TDB silence/sound/speech ratio */
+};
+
+typedef int sound_trigger_module_handle_t;
+
+struct sound_trigger_module_descriptor {
+ sound_trigger_module_handle_t handle;
+ struct sound_trigger_properties properties;
+};
+
+typedef int sound_model_handle_t;
+
+/*
+ * Generic sound model descriptor. This struct is the header of a larger block passed to
+ * load_sound_model() and containing the binary data of the sound model.
+ * Proprietary representation of users in binary data must match information indicated
+ * by users field
+ */
+struct sound_trigger_sound_model {
+ sound_trigger_sound_model_type_t type; /* model type. e.g. SOUND_MODEL_TYPE_KEYPHRASE */
+ unsigned int data_size; /* size of opaque model data */
+ unsigned int data_offset; /* offset of opaque data start from head of struct
+ (e.g sizeof struct sound_trigger_sound_model) */
+};
+
+/* key phrase descriptor */
+struct sound_trigger_phrase {
+ unsigned int recognition_mode; /* recognition modes supported by this key phrase */
+ unsigned int num_users; /* number of users in the key phrase */
+ char locale[SOUND_TRIGGER_MAX_LOCALE_LEN]; /* locale - JAVA Locale style (e.g. en_US) */
+ char text[SOUND_TRIGGER_MAX_STRING_LEN]; /* phrase text in UTF-8 format. */
+};
+
+/*
+ * Specialized sound model for key phrase detection.
+ * Proprietary representation of key phrases in binary data must match information indicated
+ * by phrases field
+ */
+struct sound_trigger_phrase_sound_model {
+ struct sound_trigger_sound_model common;
+ unsigned int num_phrases; /* number of key phrases in model */
+ struct sound_trigger_phrase phrases[SOUND_TRIGGER_MAX_PHRASES];
+};
+
+
+/*
+ * Generic recognition event sent via recognition callback
+ */
+struct sound_trigger_recognition_event {
+ int status; /* recognition status e.g.
+ RECOGNITION_STATUS_SUCCESS */
+ sound_trigger_sound_model_type_t type; /* event type, same as sound model type.
+ e.g. SOUND_MODEL_TYPE_KEYPHRASE */
+ sound_model_handle_t model; /* loaded sound model that triggered the
+ event */
+ bool capture_available; /* it is possible to capture audio from this
+ utterance buffered by the
+ implementation */
+ int capture_session; /* audio session ID. framework use */
+ int capture_delay_ms; /* delay in ms between end of model
+ detection and start of audio available
+ for capture. A negative value is possible
+ (e.g. if key phrase is also available for
+ capture */
+ unsigned int data_size; /* size of opaque event data */
+ unsigned int data_offset; /* offset of opaque data start from start of
+ this struct (e.g sizeof struct
+ sound_trigger_phrase_recognition_event) */
+};
+
+/*
+ * Specialized recognition event for key phrase detection
+ */
+struct sound_trigger_phrase_recognition_extra {
+ unsigned int recognition_modes;
+ unsigned int num_users;
+ unsigned int confidence_levels[SOUND_TRIGGER_MAX_USERS];
+};
+
+struct sound_trigger_phrase_recognition_event {
+ struct sound_trigger_recognition_event common;
+ bool key_phrase_in_capture; /* true if the key phrase is
+ present in audio data available
+ for capture after recognition
+ event is fired */
+ unsigned int num_phrases;
+ struct sound_trigger_phrase_recognition_extra phrase_extras[SOUND_TRIGGER_MAX_PHRASES];
+};
+
+
+#endif // ANDROID_SOUND_TRIGGER_H
diff --git a/package/utils/adbd/src/include/system/thread_defs.h b/package/utils/adbd/src/include/system/thread_defs.h
new file mode 100644
index 0000000..377a48c
--- /dev/null
+++ b/package/utils/adbd/src/include/system/thread_defs.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_THREAD_DEFS_H
+#define ANDROID_THREAD_DEFS_H
+
+#include "graphics.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+enum {
+ /*
+ * ***********************************************
+ * ** Keep in sync with android.os.Process.java **
+ * ***********************************************
+ *
+ * This maps directly to the "nice" priorities we use in Android.
+ * A thread priority should be chosen inverse-proportionally to
+ * the amount of work the thread is expected to do. The more work
+ * a thread will do, the less favorable priority it should get so that
+ * it doesn't starve the system. Threads not behaving properly might
+ * be "punished" by the kernel.
+ * Use the levels below when appropriate. Intermediate values are
+ * acceptable, preferably use the {MORE|LESS}_FAVORABLE constants below.
+ */
+ ANDROID_PRIORITY_LOWEST = 19,
+
+ /* use for background tasks */
+ ANDROID_PRIORITY_BACKGROUND = 10,
+
+ /* most threads run at normal priority */
+ ANDROID_PRIORITY_NORMAL = 0,
+
+ /* threads currently running a UI that the user is interacting with */
+ ANDROID_PRIORITY_FOREGROUND = -2,
+
+ /* the main UI thread has a slightly more favorable priority */
+ ANDROID_PRIORITY_DISPLAY = -4,
+
+ /* ui service treads might want to run at a urgent display (uncommon) */
+ ANDROID_PRIORITY_URGENT_DISPLAY = HAL_PRIORITY_URGENT_DISPLAY,
+
+ /* all normal audio threads */
+ ANDROID_PRIORITY_AUDIO = -16,
+
+ /* service audio threads (uncommon) */
+ ANDROID_PRIORITY_URGENT_AUDIO = -19,
+
+ /* should never be used in practice. regular process might not
+ * be allowed to use this level */
+ ANDROID_PRIORITY_HIGHEST = -20,
+
+ ANDROID_PRIORITY_DEFAULT = ANDROID_PRIORITY_NORMAL,
+ ANDROID_PRIORITY_MORE_FAVORABLE = -1,
+ ANDROID_PRIORITY_LESS_FAVORABLE = +1,
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* ANDROID_THREAD_DEFS_H */
diff --git a/package/utils/adbd/src/include/system/window.h b/package/utils/adbd/src/include/system/window.h
new file mode 100644
index 0000000..3779653
--- /dev/null
+++ b/package/utils/adbd/src/include/system/window.h
@@ -0,0 +1,864 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SYSTEM_CORE_INCLUDE_ANDROID_WINDOW_H
+#define SYSTEM_CORE_INCLUDE_ANDROID_WINDOW_H
+
+#include <cutils/native_handle.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdint.h>
+#include <string.h>
+#include <system/graphics.h>
+#include <unistd.h>
+
+#ifndef __UNUSED
+#define __UNUSED __attribute__((__unused__))
+#endif
+#ifndef __deprecated
+#define __deprecated __attribute__((__deprecated__))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*****************************************************************************/
+
+#define ANDROID_NATIVE_MAKE_CONSTANT(a,b,c,d) \
+ (((unsigned)(a)<<24)|((unsigned)(b)<<16)|((unsigned)(c)<<8)|(unsigned)(d))
+
+#define ANDROID_NATIVE_WINDOW_MAGIC \
+ ANDROID_NATIVE_MAKE_CONSTANT('_','w','n','d')
+
+#define ANDROID_NATIVE_BUFFER_MAGIC \
+ ANDROID_NATIVE_MAKE_CONSTANT('_','b','f','r')
+
+// ---------------------------------------------------------------------------
+
+// This #define may be used to conditionally compile device-specific code to
+// support either the prior ANativeWindow interface, which did not pass libsync
+// fences around, or the new interface that does. This #define is only present
+// when the ANativeWindow interface does include libsync support.
+#define ANDROID_NATIVE_WINDOW_HAS_SYNC 1
+
+// ---------------------------------------------------------------------------
+
+typedef const native_handle_t* buffer_handle_t;
+
+// ---------------------------------------------------------------------------
+
+typedef struct android_native_rect_t
+{
+ int32_t left;
+ int32_t top;
+ int32_t right;
+ int32_t bottom;
+} android_native_rect_t;
+
+// ---------------------------------------------------------------------------
+
+typedef struct android_native_base_t
+{
+ /* a magic value defined by the actual EGL native type */
+ int magic;
+
+ /* the sizeof() of the actual EGL native type */
+ int version;
+
+ void* reserved[4];
+
+ /* reference-counting interface */
+ void (*incRef)(struct android_native_base_t* base);
+ void (*decRef)(struct android_native_base_t* base);
+} android_native_base_t;
+
+typedef struct ANativeWindowBuffer
+{
+#ifdef __cplusplus
+ ANativeWindowBuffer() {
+ common.magic = ANDROID_NATIVE_BUFFER_MAGIC;
+ common.version = sizeof(ANativeWindowBuffer);
+ memset(common.reserved, 0, sizeof(common.reserved));
+ }
+
+ // Implement the methods that sp<ANativeWindowBuffer> expects so that it
+ // can be used to automatically refcount ANativeWindowBuffer's.
+ void incStrong(const void* /*id*/) const {
+ common.incRef(const_cast<android_native_base_t*>(&common));
+ }
+ void decStrong(const void* /*id*/) const {
+ common.decRef(const_cast<android_native_base_t*>(&common));
+ }
+#endif
+
+ struct android_native_base_t common;
+
+ int width;
+ int height;
+ int stride;
+ int format;
+ int usage;
+
+ void* reserved[2];
+
+ buffer_handle_t handle;
+
+ void* reserved_proc[8];
+} ANativeWindowBuffer_t;
+
+// Old typedef for backwards compatibility.
+typedef ANativeWindowBuffer_t android_native_buffer_t;
+
+// ---------------------------------------------------------------------------
+
+/* attributes queriable with query() */
+enum {
+ NATIVE_WINDOW_WIDTH = 0,
+ NATIVE_WINDOW_HEIGHT = 1,
+ NATIVE_WINDOW_FORMAT = 2,
+
+ /* The minimum number of buffers that must remain un-dequeued after a buffer
+ * has been queued. This value applies only if set_buffer_count was used to
+ * override the number of buffers and if a buffer has since been queued.
+ * Users of the set_buffer_count ANativeWindow method should query this
+ * value before calling set_buffer_count. If it is necessary to have N
+ * buffers simultaneously dequeued as part of the steady-state operation,
+ * and this query returns M then N+M buffers should be requested via
+ * native_window_set_buffer_count.
+ *
+ * Note that this value does NOT apply until a single buffer has been
+ * queued. In particular this means that it is possible to:
+ *
+ * 1. Query M = min undequeued buffers
+ * 2. Set the buffer count to N + M
+ * 3. Dequeue all N + M buffers
+ * 4. Cancel M buffers
+ * 5. Queue, dequeue, queue, dequeue, ad infinitum
+ */
+ NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS = 3,
+
+ /* Check whether queueBuffer operations on the ANativeWindow send the buffer
+ * to the window compositor. The query sets the returned 'value' argument
+ * to 1 if the ANativeWindow DOES send queued buffers directly to the window
+ * compositor and 0 if the buffers do not go directly to the window
+ * compositor.
+ *
+ * This can be used to determine whether protected buffer content should be
+ * sent to the ANativeWindow. Note, however, that a result of 1 does NOT
+ * indicate that queued buffers will be protected from applications or users
+ * capturing their contents. If that behavior is desired then some other
+ * mechanism (e.g. the GRALLOC_USAGE_PROTECTED flag) should be used in
+ * conjunction with this query.
+ */
+ NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER = 4,
+
+ /* Get the concrete type of a ANativeWindow. See below for the list of
+ * possible return values.
+ *
+ * This query should not be used outside the Android framework and will
+ * likely be removed in the near future.
+ */
+ NATIVE_WINDOW_CONCRETE_TYPE = 5,
+
+
+ /*
+ * Default width and height of ANativeWindow buffers, these are the
+ * dimensions of the window buffers irrespective of the
+ * NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS call and match the native window
+ * size unless overridden by NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS.
+ */
+ NATIVE_WINDOW_DEFAULT_WIDTH = 6,
+ NATIVE_WINDOW_DEFAULT_HEIGHT = 7,
+
+ /*
+ * transformation that will most-likely be applied to buffers. This is only
+ * a hint, the actual transformation applied might be different.
+ *
+ * INTENDED USE:
+ *
+ * The transform hint can be used by a producer, for instance the GLES
+ * driver, to pre-rotate the rendering such that the final transformation
+ * in the composer is identity. This can be very useful when used in
+ * conjunction with the h/w composer HAL, in situations where it
+ * cannot handle arbitrary rotations.
+ *
+ * 1. Before dequeuing a buffer, the GL driver (or any other ANW client)
+ * queries the ANW for NATIVE_WINDOW_TRANSFORM_HINT.
+ *
+ * 2. The GL driver overrides the width and height of the ANW to
+ * account for NATIVE_WINDOW_TRANSFORM_HINT. This is done by querying
+ * NATIVE_WINDOW_DEFAULT_{WIDTH | HEIGHT}, swapping the dimensions
+ * according to NATIVE_WINDOW_TRANSFORM_HINT and calling
+ * native_window_set_buffers_dimensions().
+ *
+ * 3. The GL driver dequeues a buffer of the new pre-rotated size.
+ *
+ * 4. The GL driver renders to the buffer such that the image is
+ * already transformed, that is applying NATIVE_WINDOW_TRANSFORM_HINT
+ * to the rendering.
+ *
+ * 5. The GL driver calls native_window_set_transform to apply
+ * inverse transformation to the buffer it just rendered.
+ * In order to do this, the GL driver needs
+ * to calculate the inverse of NATIVE_WINDOW_TRANSFORM_HINT, this is
+ * done easily:
+ *
+ * int hintTransform, inverseTransform;
+ * query(..., NATIVE_WINDOW_TRANSFORM_HINT, &hintTransform);
+ * inverseTransform = hintTransform;
+ * if (hintTransform & HAL_TRANSFORM_ROT_90)
+ * inverseTransform ^= HAL_TRANSFORM_ROT_180;
+ *
+ *
+ * 6. The GL driver queues the pre-transformed buffer.
+ *
+ * 7. The composer combines the buffer transform with the display
+ * transform. If the buffer transform happens to cancel out the
+ * display transform then no rotation is needed.
+ *
+ */
+ NATIVE_WINDOW_TRANSFORM_HINT = 8,
+
+ /*
+ * Boolean that indicates whether the consumer is running more than
+ * one buffer behind the producer.
+ */
+ NATIVE_WINDOW_CONSUMER_RUNNING_BEHIND = 9,
+
+ /*
+ * The consumer gralloc usage bits currently set by the consumer.
+ * The values are defined in hardware/libhardware/include/gralloc.h.
+ */
+ NATIVE_WINDOW_CONSUMER_USAGE_BITS = 10
+};
+
+/* Valid operations for the (*perform)() hook.
+ *
+ * Values marked as 'deprecated' are supported, but have been superceded by
+ * other functionality.
+ *
+ * Values marked as 'private' should be considered private to the framework.
+ * HAL implementation code with access to an ANativeWindow should not use these,
+ * as it may not interact properly with the framework's use of the
+ * ANativeWindow.
+ */
+enum {
+ NATIVE_WINDOW_SET_USAGE = 0,
+ NATIVE_WINDOW_CONNECT = 1, /* deprecated */
+ NATIVE_WINDOW_DISCONNECT = 2, /* deprecated */
+ NATIVE_WINDOW_SET_CROP = 3, /* private */
+ NATIVE_WINDOW_SET_BUFFER_COUNT = 4,
+ NATIVE_WINDOW_SET_BUFFERS_GEOMETRY = 5, /* deprecated */
+ NATIVE_WINDOW_SET_BUFFERS_TRANSFORM = 6,
+ NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP = 7,
+ NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS = 8,
+ NATIVE_WINDOW_SET_BUFFERS_FORMAT = 9,
+ NATIVE_WINDOW_SET_SCALING_MODE = 10, /* private */
+ NATIVE_WINDOW_LOCK = 11, /* private */
+ NATIVE_WINDOW_UNLOCK_AND_POST = 12, /* private */
+ NATIVE_WINDOW_API_CONNECT = 13, /* private */
+ NATIVE_WINDOW_API_DISCONNECT = 14, /* private */
+ NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS = 15, /* private */
+ NATIVE_WINDOW_SET_POST_TRANSFORM_CROP = 16, /* private */
+};
+
+/* parameter for NATIVE_WINDOW_[API_][DIS]CONNECT */
+enum {
+ /* Buffers will be queued by EGL via eglSwapBuffers after being filled using
+ * OpenGL ES.
+ */
+ NATIVE_WINDOW_API_EGL = 1,
+
+ /* Buffers will be queued after being filled using the CPU
+ */
+ NATIVE_WINDOW_API_CPU = 2,
+
+ /* Buffers will be queued by Stagefright after being filled by a video
+ * decoder. The video decoder can either be a software or hardware decoder.
+ */
+ NATIVE_WINDOW_API_MEDIA = 3,
+
+ /* Buffers will be queued by the the camera HAL.
+ */
+ NATIVE_WINDOW_API_CAMERA = 4,
+};
+
+/* parameter for NATIVE_WINDOW_SET_BUFFERS_TRANSFORM */
+enum {
+ /* flip source image horizontally */
+ NATIVE_WINDOW_TRANSFORM_FLIP_H = HAL_TRANSFORM_FLIP_H ,
+ /* flip source image vertically */
+ NATIVE_WINDOW_TRANSFORM_FLIP_V = HAL_TRANSFORM_FLIP_V,
+ /* rotate source image 90 degrees clock-wise, and is applied after TRANSFORM_FLIP_{H|V} */
+ NATIVE_WINDOW_TRANSFORM_ROT_90 = HAL_TRANSFORM_ROT_90,
+ /* rotate source image 180 degrees */
+ NATIVE_WINDOW_TRANSFORM_ROT_180 = HAL_TRANSFORM_ROT_180,
+ /* rotate source image 270 degrees clock-wise */
+ NATIVE_WINDOW_TRANSFORM_ROT_270 = HAL_TRANSFORM_ROT_270,
+ /* transforms source by the inverse transform of the screen it is displayed onto. This
+ * transform is applied last */
+ NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY = 0x08
+};
+
+/* parameter for NATIVE_WINDOW_SET_SCALING_MODE */
+enum {
+ /* the window content is not updated (frozen) until a buffer of
+ * the window size is received (enqueued)
+ */
+ NATIVE_WINDOW_SCALING_MODE_FREEZE = 0,
+ /* the buffer is scaled in both dimensions to match the window size */
+ NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW = 1,
+ /* the buffer is scaled uniformly such that the smaller dimension
+ * of the buffer matches the window size (cropping in the process)
+ */
+ NATIVE_WINDOW_SCALING_MODE_SCALE_CROP = 2,
+ /* the window is clipped to the size of the buffer's crop rectangle; pixels
+ * outside the crop rectangle are treated as if they are completely
+ * transparent.
+ */
+ NATIVE_WINDOW_SCALING_MODE_NO_SCALE_CROP = 3,
+};
+
+/* values returned by the NATIVE_WINDOW_CONCRETE_TYPE query */
+enum {
+ NATIVE_WINDOW_FRAMEBUFFER = 0, /* FramebufferNativeWindow */
+ NATIVE_WINDOW_SURFACE = 1, /* Surface */
+};
+
+/* parameter for NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP
+ *
+ * Special timestamp value to indicate that timestamps should be auto-generated
+ * by the native window when queueBuffer is called. This is equal to INT64_MIN,
+ * defined directly to avoid problems with C99/C++ inclusion of stdint.h.
+ */
+static const int64_t NATIVE_WINDOW_TIMESTAMP_AUTO = (-9223372036854775807LL-1);
+
+struct ANativeWindow
+{
+#ifdef __cplusplus
+ ANativeWindow()
+ : flags(0), minSwapInterval(0), maxSwapInterval(0), xdpi(0), ydpi(0)
+ {
+ common.magic = ANDROID_NATIVE_WINDOW_MAGIC;
+ common.version = sizeof(ANativeWindow);
+ memset(common.reserved, 0, sizeof(common.reserved));
+ }
+
+ /* Implement the methods that sp<ANativeWindow> expects so that it
+ can be used to automatically refcount ANativeWindow's. */
+ void incStrong(const void* /*id*/) const {
+ common.incRef(const_cast<android_native_base_t*>(&common));
+ }
+ void decStrong(const void* /*id*/) const {
+ common.decRef(const_cast<android_native_base_t*>(&common));
+ }
+#endif
+
+ struct android_native_base_t common;
+
+ /* flags describing some attributes of this surface or its updater */
+ const uint32_t flags;
+
+ /* min swap interval supported by this updated */
+ const int minSwapInterval;
+
+ /* max swap interval supported by this updated */
+ const int maxSwapInterval;
+
+ /* horizontal and vertical resolution in DPI */
+ const float xdpi;
+ const float ydpi;
+
+ /* Some storage reserved for the OEM's driver. */
+ intptr_t oem[4];
+
+ /*
+ * Set the swap interval for this surface.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*setSwapInterval)(struct ANativeWindow* window,
+ int interval);
+
+ /*
+ * Hook called by EGL to acquire a buffer. After this call, the buffer
+ * is not locked, so its content cannot be modified. This call may block if
+ * no buffers are available.
+ *
+ * The window holds a reference to the buffer between dequeueBuffer and
+ * either queueBuffer or cancelBuffer, so clients only need their own
+ * reference if they might use the buffer after queueing or canceling it.
+ * Holding a reference to a buffer after queueing or canceling it is only
+ * allowed if a specific buffer count has been set.
+ *
+ * Returns 0 on success or -errno on error.
+ *
+ * XXX: This function is deprecated. It will continue to work for some
+ * time for binary compatibility, but the new dequeueBuffer function that
+ * outputs a fence file descriptor should be used in its place.
+ */
+ int (*dequeueBuffer_DEPRECATED)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer** buffer);
+
+ /*
+ * hook called by EGL to lock a buffer. This MUST be called before modifying
+ * the content of a buffer. The buffer must have been acquired with
+ * dequeueBuffer first.
+ *
+ * Returns 0 on success or -errno on error.
+ *
+ * XXX: This function is deprecated. It will continue to work for some
+ * time for binary compatibility, but it is essentially a no-op, and calls
+ * to it should be removed.
+ */
+ int (*lockBuffer_DEPRECATED)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer* buffer);
+
+ /*
+ * Hook called by EGL when modifications to the render buffer are done.
+ * This unlocks and post the buffer.
+ *
+ * The window holds a reference to the buffer between dequeueBuffer and
+ * either queueBuffer or cancelBuffer, so clients only need their own
+ * reference if they might use the buffer after queueing or canceling it.
+ * Holding a reference to a buffer after queueing or canceling it is only
+ * allowed if a specific buffer count has been set.
+ *
+ * Buffers MUST be queued in the same order than they were dequeued.
+ *
+ * Returns 0 on success or -errno on error.
+ *
+ * XXX: This function is deprecated. It will continue to work for some
+ * time for binary compatibility, but the new queueBuffer function that
+ * takes a fence file descriptor should be used in its place (pass a value
+ * of -1 for the fence file descriptor if there is no valid one to pass).
+ */
+ int (*queueBuffer_DEPRECATED)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer* buffer);
+
+ /*
+ * hook used to retrieve information about the native window.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*query)(const struct ANativeWindow* window,
+ int what, int* value);
+
+ /*
+ * hook used to perform various operations on the surface.
+ * (*perform)() is a generic mechanism to add functionality to
+ * ANativeWindow while keeping backward binary compatibility.
+ *
+ * DO NOT CALL THIS HOOK DIRECTLY. Instead, use the helper functions
+ * defined below.
+ *
+ * (*perform)() returns -ENOENT if the 'what' parameter is not supported
+ * by the surface's implementation.
+ *
+ * The valid operations are:
+ * NATIVE_WINDOW_SET_USAGE
+ * NATIVE_WINDOW_CONNECT (deprecated)
+ * NATIVE_WINDOW_DISCONNECT (deprecated)
+ * NATIVE_WINDOW_SET_CROP (private)
+ * NATIVE_WINDOW_SET_BUFFER_COUNT
+ * NATIVE_WINDOW_SET_BUFFERS_GEOMETRY (deprecated)
+ * NATIVE_WINDOW_SET_BUFFERS_TRANSFORM
+ * NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP
+ * NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS
+ * NATIVE_WINDOW_SET_BUFFERS_FORMAT
+ * NATIVE_WINDOW_SET_SCALING_MODE (private)
+ * NATIVE_WINDOW_LOCK (private)
+ * NATIVE_WINDOW_UNLOCK_AND_POST (private)
+ * NATIVE_WINDOW_API_CONNECT (private)
+ * NATIVE_WINDOW_API_DISCONNECT (private)
+ * NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS (private)
+ * NATIVE_WINDOW_SET_POST_TRANSFORM_CROP (private)
+ *
+ */
+
+ int (*perform)(struct ANativeWindow* window,
+ int operation, ... );
+
+ /*
+ * Hook used to cancel a buffer that has been dequeued.
+ * No synchronization is performed between dequeue() and cancel(), so
+ * either external synchronization is needed, or these functions must be
+ * called from the same thread.
+ *
+ * The window holds a reference to the buffer between dequeueBuffer and
+ * either queueBuffer or cancelBuffer, so clients only need their own
+ * reference if they might use the buffer after queueing or canceling it.
+ * Holding a reference to a buffer after queueing or canceling it is only
+ * allowed if a specific buffer count has been set.
+ *
+ * XXX: This function is deprecated. It will continue to work for some
+ * time for binary compatibility, but the new cancelBuffer function that
+ * takes a fence file descriptor should be used in its place (pass a value
+ * of -1 for the fence file descriptor if there is no valid one to pass).
+ */
+ int (*cancelBuffer_DEPRECATED)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer* buffer);
+
+ /*
+ * Hook called by EGL to acquire a buffer. This call may block if no
+ * buffers are available.
+ *
+ * The window holds a reference to the buffer between dequeueBuffer and
+ * either queueBuffer or cancelBuffer, so clients only need their own
+ * reference if they might use the buffer after queueing or canceling it.
+ * Holding a reference to a buffer after queueing or canceling it is only
+ * allowed if a specific buffer count has been set.
+ *
+ * The libsync fence file descriptor returned in the int pointed to by the
+ * fenceFd argument will refer to the fence that must signal before the
+ * dequeued buffer may be written to. A value of -1 indicates that the
+ * caller may access the buffer immediately without waiting on a fence. If
+ * a valid file descriptor is returned (i.e. any value except -1) then the
+ * caller is responsible for closing the file descriptor.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*dequeueBuffer)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer** buffer, int* fenceFd);
+
+ /*
+ * Hook called by EGL when modifications to the render buffer are done.
+ * This unlocks and post the buffer.
+ *
+ * The window holds a reference to the buffer between dequeueBuffer and
+ * either queueBuffer or cancelBuffer, so clients only need their own
+ * reference if they might use the buffer after queueing or canceling it.
+ * Holding a reference to a buffer after queueing or canceling it is only
+ * allowed if a specific buffer count has been set.
+ *
+ * The fenceFd argument specifies a libsync fence file descriptor for a
+ * fence that must signal before the buffer can be accessed. If the buffer
+ * can be accessed immediately then a value of -1 should be used. The
+ * caller must not use the file descriptor after it is passed to
+ * queueBuffer, and the ANativeWindow implementation is responsible for
+ * closing it.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*queueBuffer)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer* buffer, int fenceFd);
+
+ /*
+ * Hook used to cancel a buffer that has been dequeued.
+ * No synchronization is performed between dequeue() and cancel(), so
+ * either external synchronization is needed, or these functions must be
+ * called from the same thread.
+ *
+ * The window holds a reference to the buffer between dequeueBuffer and
+ * either queueBuffer or cancelBuffer, so clients only need their own
+ * reference if they might use the buffer after queueing or canceling it.
+ * Holding a reference to a buffer after queueing or canceling it is only
+ * allowed if a specific buffer count has been set.
+ *
+ * The fenceFd argument specifies a libsync fence file decsriptor for a
+ * fence that must signal before the buffer can be accessed. If the buffer
+ * can be accessed immediately then a value of -1 should be used.
+ *
+ * Note that if the client has not waited on the fence that was returned
+ * from dequeueBuffer, that same fence should be passed to cancelBuffer to
+ * ensure that future uses of the buffer are preceded by a wait on that
+ * fence. The caller must not use the file descriptor after it is passed
+ * to cancelBuffer, and the ANativeWindow implementation is responsible for
+ * closing it.
+ *
+ * Returns 0 on success or -errno on error.
+ */
+ int (*cancelBuffer)(struct ANativeWindow* window,
+ struct ANativeWindowBuffer* buffer, int fenceFd);
+};
+
+ /* Backwards compatibility: use ANativeWindow (struct ANativeWindow in C).
+ * android_native_window_t is deprecated.
+ */
+typedef struct ANativeWindow ANativeWindow;
+typedef struct ANativeWindow android_native_window_t __deprecated;
+
+/*
+ * native_window_set_usage(..., usage)
+ * Sets the intended usage flags for the next buffers
+ * acquired with (*lockBuffer)() and on.
+ * By default (if this function is never called), a usage of
+ * GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_TEXTURE
+ * is assumed.
+ * Calling this function will usually cause following buffers to be
+ * reallocated.
+ */
+
+static inline int native_window_set_usage(
+ struct ANativeWindow* window, int usage)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_USAGE, usage);
+}
+
+/* deprecated. Always returns 0. Don't call. */
+static inline int native_window_connect(
+ struct ANativeWindow* window __UNUSED, int api __UNUSED) __deprecated;
+
+static inline int native_window_connect(
+ struct ANativeWindow* window __UNUSED, int api __UNUSED) {
+ return 0;
+}
+
+/* deprecated. Always returns 0. Don't call. */
+static inline int native_window_disconnect(
+ struct ANativeWindow* window __UNUSED, int api __UNUSED) __deprecated;
+
+static inline int native_window_disconnect(
+ struct ANativeWindow* window __UNUSED, int api __UNUSED) {
+ return 0;
+}
+
+/*
+ * native_window_set_crop(..., crop)
+ * Sets which region of the next queued buffers needs to be considered.
+ * Depending on the scaling mode, a buffer's crop region is scaled and/or
+ * cropped to match the surface's size. This function sets the crop in
+ * pre-transformed buffer pixel coordinates.
+ *
+ * The specified crop region applies to all buffers queued after it is called.
+ *
+ * If 'crop' is NULL, subsequently queued buffers won't be cropped.
+ *
+ * An error is returned if for instance the crop region is invalid, out of the
+ * buffer's bound or if the window is invalid.
+ */
+static inline int native_window_set_crop(
+ struct ANativeWindow* window,
+ android_native_rect_t const * crop)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_CROP, crop);
+}
+
+/*
+ * native_window_set_post_transform_crop(..., crop)
+ * Sets which region of the next queued buffers needs to be considered.
+ * Depending on the scaling mode, a buffer's crop region is scaled and/or
+ * cropped to match the surface's size. This function sets the crop in
+ * post-transformed pixel coordinates.
+ *
+ * The specified crop region applies to all buffers queued after it is called.
+ *
+ * If 'crop' is NULL, subsequently queued buffers won't be cropped.
+ *
+ * An error is returned if for instance the crop region is invalid, out of the
+ * buffer's bound or if the window is invalid.
+ */
+static inline int native_window_set_post_transform_crop(
+ struct ANativeWindow* window,
+ android_native_rect_t const * crop)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_POST_TRANSFORM_CROP, crop);
+}
+
+/*
+ * native_window_set_active_rect(..., active_rect)
+ *
+ * This function is deprecated and will be removed soon. For now it simply
+ * sets the post-transform crop for compatibility while multi-project commits
+ * get checked.
+ */
+static inline int native_window_set_active_rect(
+ struct ANativeWindow* window,
+ android_native_rect_t const * active_rect) __deprecated;
+
+static inline int native_window_set_active_rect(
+ struct ANativeWindow* window,
+ android_native_rect_t const * active_rect)
+{
+ return native_window_set_post_transform_crop(window, active_rect);
+}
+
+/*
+ * native_window_set_buffer_count(..., count)
+ * Sets the number of buffers associated with this native window.
+ */
+static inline int native_window_set_buffer_count(
+ struct ANativeWindow* window,
+ size_t bufferCount)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFER_COUNT, bufferCount);
+}
+
+/*
+ * native_window_set_buffers_geometry(..., int w, int h, int format)
+ * All buffers dequeued after this call will have the dimensions and format
+ * specified. A successful call to this function has the same effect as calling
+ * native_window_set_buffers_size and native_window_set_buffers_format.
+ *
+ * XXX: This function is deprecated. The native_window_set_buffers_dimensions
+ * and native_window_set_buffers_format functions should be used instead.
+ */
+static inline int native_window_set_buffers_geometry(
+ struct ANativeWindow* window,
+ int w, int h, int format) __deprecated;
+
+static inline int native_window_set_buffers_geometry(
+ struct ANativeWindow* window,
+ int w, int h, int format)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_GEOMETRY,
+ w, h, format);
+}
+
+/*
+ * native_window_set_buffers_dimensions(..., int w, int h)
+ * All buffers dequeued after this call will have the dimensions specified.
+ * In particular, all buffers will have a fixed-size, independent from the
+ * native-window size. They will be scaled according to the scaling mode
+ * (see native_window_set_scaling_mode) upon window composition.
+ *
+ * If w and h are 0, the normal behavior is restored. That is, dequeued buffers
+ * following this call will be sized to match the window's size.
+ *
+ * Calling this function will reset the window crop to a NULL value, which
+ * disables cropping of the buffers.
+ */
+static inline int native_window_set_buffers_dimensions(
+ struct ANativeWindow* window,
+ int w, int h)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS,
+ w, h);
+}
+
+/*
+ * native_window_set_buffers_user_dimensions(..., int w, int h)
+ *
+ * Sets the user buffer size for the window, which overrides the
+ * window's size. All buffers dequeued after this call will have the
+ * dimensions specified unless overridden by
+ * native_window_set_buffers_dimensions. All buffers will have a
+ * fixed-size, independent from the native-window size. They will be
+ * scaled according to the scaling mode (see
+ * native_window_set_scaling_mode) upon window composition.
+ *
+ * If w and h are 0, the normal behavior is restored. That is, the
+ * default buffer size will match the windows's size.
+ *
+ * Calling this function will reset the window crop to a NULL value, which
+ * disables cropping of the buffers.
+ */
+static inline int native_window_set_buffers_user_dimensions(
+ struct ANativeWindow* window,
+ int w, int h)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS,
+ w, h);
+}
+
+/*
+ * native_window_set_buffers_format(..., int format)
+ * All buffers dequeued after this call will have the format specified.
+ *
+ * If the specified format is 0, the default buffer format will be used.
+ */
+static inline int native_window_set_buffers_format(
+ struct ANativeWindow* window,
+ int format)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_FORMAT, format);
+}
+
+/*
+ * native_window_set_buffers_transform(..., int transform)
+ * All buffers queued after this call will be displayed transformed according
+ * to the transform parameter specified.
+ */
+static inline int native_window_set_buffers_transform(
+ struct ANativeWindow* window,
+ int transform)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_TRANSFORM,
+ transform);
+}
+
+/*
+ * native_window_set_buffers_timestamp(..., int64_t timestamp)
+ * All buffers queued after this call will be associated with the timestamp
+ * parameter specified. If the timestamp is set to NATIVE_WINDOW_TIMESTAMP_AUTO
+ * (the default), timestamps will be generated automatically when queueBuffer is
+ * called. The timestamp is measured in nanoseconds, and is normally monotonically
+ * increasing. The timestamp should be unaffected by time-of-day adjustments,
+ * and for a camera should be strictly monotonic but for a media player may be
+ * reset when the position is set.
+ */
+static inline int native_window_set_buffers_timestamp(
+ struct ANativeWindow* window,
+ int64_t timestamp)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP,
+ timestamp);
+}
+
+/*
+ * native_window_set_scaling_mode(..., int mode)
+ * All buffers queued after this call will be associated with the scaling mode
+ * specified.
+ */
+static inline int native_window_set_scaling_mode(
+ struct ANativeWindow* window,
+ int mode)
+{
+ return window->perform(window, NATIVE_WINDOW_SET_SCALING_MODE,
+ mode);
+}
+
+/*
+ * native_window_api_connect(..., int api)
+ * connects an API to this window. only one API can be connected at a time.
+ * Returns -EINVAL if for some reason the window cannot be connected, which
+ * can happen if it's connected to some other API.
+ */
+static inline int native_window_api_connect(
+ struct ANativeWindow* window, int api)
+{
+ return window->perform(window, NATIVE_WINDOW_API_CONNECT, api);
+}
+
+/*
+ * native_window_api_disconnect(..., int api)
+ * disconnect the API from this window.
+ * An error is returned if for instance the window wasn't connected in the
+ * first place.
+ */
+static inline int native_window_api_disconnect(
+ struct ANativeWindow* window, int api)
+{
+ return window->perform(window, NATIVE_WINDOW_API_DISCONNECT, api);
+}
+
+/*
+ * native_window_dequeue_buffer_and_wait(...)
+ * Dequeue a buffer and wait on the fence associated with that buffer. The
+ * buffer may safely be accessed immediately upon this function returning. An
+ * error is returned if either of the dequeue or the wait operations fail.
+ */
+static inline int native_window_dequeue_buffer_and_wait(ANativeWindow *anw,
+ struct ANativeWindowBuffer** anb) {
+ return anw->dequeueBuffer_DEPRECATED(anw, anb);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SYSTEM_CORE_INCLUDE_ANDROID_WINDOW_H */