OSDN Git Service

android: fix building errors on android-6.0.1_r52
[android-x86/external-bluetooth-bluez.git] / android / hal-audio.c
index f6bc998..f017c3b 100644 (file)
 #include <hardware/audio.h>
 #include <hardware/hardware.h>
 
-#include <sbc/sbc.h>
-
 #include "audio-msg.h"
 #include "ipc-common.h"
 #include "hal-log.h"
 #include "hal-msg.h"
-#include "../profiles/audio/a2dp-codecs.h"
-#include "../src/shared/util.h"
+#include "hal-audio.h"
+#include "hal-utils.h"
+#include "hal.h"
 
 #define FIXED_A2DP_PLAYBACK_LATENCY_MS 25
 
 #define FIXED_BUFFER_SIZE (20 * 512)
 
-#define MAX_FRAMES_IN_PAYLOAD 15
+#define MAX_DELAY      100000 /* 100ms */
 
 static const uint8_t a2dp_src_uuid[] = {
                0x00, 0x00, 0x11, 0x0a, 0x00, 0x00, 0x10, 0x00,
@@ -55,94 +54,19 @@ static int audio_sk = -1;
 static pthread_t ipc_th = 0;
 static pthread_mutex_t sk_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-
-struct rtp_header {
-       unsigned cc:4;
-       unsigned x:1;
-       unsigned p:1;
-       unsigned v:2;
-
-       unsigned pt:7;
-       unsigned m:1;
-
-       uint16_t sequence_number;
-       uint32_t timestamp;
-       uint32_t ssrc;
-       uint32_t csrc[0];
-} __attribute__ ((packed));
-
-struct rtp_payload {
-       unsigned frame_count:4;
-       unsigned rfa0:1;
-       unsigned is_last_fragment:1;
-       unsigned is_first_fragment:1;
-       unsigned is_fragmented:1;
-} __attribute__ ((packed));
-
-#elif __BYTE_ORDER == __BIG_ENDIAN
-
-struct rtp_header {
-       unsigned v:2;
-       unsigned p:1;
-       unsigned x:1;
-       unsigned cc:4;
-
-       unsigned m:1;
-       unsigned pt:7;
-
-       uint16_t sequence_number;
-       uint32_t timestamp;
-       uint32_t ssrc;
-       uint32_t csrc[0];
-} __attribute__ ((packed));
-
-struct rtp_payload {
-       unsigned is_fragmented:1;
-       unsigned is_first_fragment:1;
-       unsigned is_last_fragment:1;
-       unsigned rfa0:1;
-       unsigned frame_count:4;
-} __attribute__ ((packed));
-
-#else
-#error "Unknown byte order"
-#endif
-
-struct media_packet {
-       struct rtp_header hdr;
-       struct rtp_payload payload;
-       uint8_t data[0];
-};
-
-struct audio_input_config {
-       uint32_t rate;
-       uint32_t channels;
-       audio_format_t format;
-};
-
-struct sbc_data {
-       a2dp_sbc_t sbc;
-
-       sbc_t enc;
-
-       size_t in_frame_len;
-       size_t in_buf_size;
-
-       size_t out_buf_size;
-       uint8_t *out_buf;
-
-       unsigned frame_duration;
-       unsigned frames_per_packet;
-
-       struct timespec start;
-       unsigned frames_sent;
-       uint32_t timestamp;
+static void timespec_add(struct timespec *base, uint64_t time_us,
+                                                       struct timespec *res)
+{
+       res->tv_sec = base->tv_sec + time_us / 1000000;
+       res->tv_nsec = base->tv_nsec + (time_us % 1000000) * 1000;
 
-       uint16_t seq;
-};
+       if (res->tv_nsec >= 1000000000) {
+               res->tv_sec++;
+               res->tv_nsec -= 1000000000;
+       }
+}
 
-static inline void timespec_diff(struct timespec *a, struct timespec *b,
+static void timespec_diff(struct timespec *a, struct timespec *b,
                                                        struct timespec *res)
 {
        res->tv_sec = a->tv_sec - b->tv_sec;
@@ -154,48 +78,31 @@ static inline void timespec_diff(struct timespec *a, struct timespec *b,
        }
 }
 
-static int sbc_get_presets(struct audio_preset *preset, size_t *len);
-static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
-                                                       void **codec_data);
-static int sbc_cleanup(void *codec_data);
-static int sbc_get_config(void *codec_data, struct audio_input_config *config);
-static size_t sbc_get_buffer_size(void *codec_data);
-static size_t sbc_get_mediapacket_duration(void *codec_data);
-static void sbc_resume(void *codec_data);
-static ssize_t sbc_write_data(void *codec_data, const void *buffer,
-                                                       size_t bytes, int fd);
-
-struct audio_codec {
-       uint8_t type;
-
-       int (*get_presets) (struct audio_preset *preset, size_t *len);
-
-       int (*init) (struct audio_preset *preset, uint16_t mtu,
-                               void **codec_data);
-       int (*cleanup) (void *codec_data);
-       int (*get_config) (void *codec_data,
-                                       struct audio_input_config *config);
-       size_t (*get_buffer_size) (void *codec_data);
-       size_t (*get_mediapacket_duration) (void *codec_data);
-       void (*resume) (void *codec_data);
-       ssize_t (*write_data) (void *codec_data, const void *buffer,
-                                                       size_t bytes, int fd);
-};
+static uint64_t timespec_diff_us(struct timespec *a, struct timespec *b)
+{
+       struct timespec res;
+
+       timespec_diff(a, b, &res);
 
-static const struct audio_codec audio_codecs[] = {
-       {
-               .type = A2DP_CODEC_SBC,
+       return res.tv_sec * 1000000ll + res.tv_nsec / 1000ll;
+}
 
-               .get_presets = sbc_get_presets,
+#if ANDROID_VERSION < PLATFORM_VER(6, 0, 0)
+/*
+ * Bionic does not have clock_nanosleep() prototype in time.h even though
+ * it provides its implementation.
+ */
+extern int clock_nanosleep(clockid_t clock_id, int flags,
+                                       const struct timespec *request,
+                                       struct timespec *remain);
+#endif
 
-               .init = sbc_codec_init,
-               .cleanup = sbc_cleanup,
-               .get_config = sbc_get_config,
-               .get_buffer_size = sbc_get_buffer_size,
-               .get_mediapacket_duration = sbc_get_mediapacket_duration,
-               .resume = sbc_resume,
-               .write_data = sbc_write_data,
-       }
+static struct {
+       const audio_codec_get_t get_codec;
+       bool loaded;
+} audio_codecs[] = {
+               { .get_codec = codec_aptx, .loaded = false },
+               { .get_codec = codec_sbc, .loaded = false },
 };
 
 #define NUM_CODECS (sizeof(audio_codecs) / sizeof(audio_codecs[0]))
@@ -207,6 +114,15 @@ struct audio_endpoint {
        const struct audio_codec *codec;
        void *codec_data;
        int fd;
+
+       struct media_packet *mp;
+       size_t mp_data_len;
+
+       uint16_t seq;
+       uint32_t samples;
+       struct timespec start;
+
+       bool resync;
 };
 
 static struct audio_endpoint audio_endpoints[MAX_AUDIO_ENDPOINTS];
@@ -233,382 +149,6 @@ struct a2dp_audio_dev {
        struct a2dp_stream_out *out;
 };
 
-static const a2dp_sbc_t sbc_presets[] = {
-       {
-               .frequency = SBC_SAMPLING_FREQ_44100 | SBC_SAMPLING_FREQ_48000,
-               .channel_mode = SBC_CHANNEL_MODE_MONO |
-                               SBC_CHANNEL_MODE_DUAL_CHANNEL |
-                               SBC_CHANNEL_MODE_STEREO |
-                               SBC_CHANNEL_MODE_JOINT_STEREO,
-               .subbands = SBC_SUBBANDS_4 | SBC_SUBBANDS_8,
-               .allocation_method = SBC_ALLOCATION_SNR |
-                                       SBC_ALLOCATION_LOUDNESS,
-               .block_length = SBC_BLOCK_LENGTH_4 | SBC_BLOCK_LENGTH_8 |
-                               SBC_BLOCK_LENGTH_12 | SBC_BLOCK_LENGTH_16,
-               .min_bitpool = MIN_BITPOOL,
-               .max_bitpool = MAX_BITPOOL
-       },
-       {
-               .frequency = SBC_SAMPLING_FREQ_44100,
-               .channel_mode = SBC_CHANNEL_MODE_JOINT_STEREO,
-               .subbands = SBC_SUBBANDS_8,
-               .allocation_method = SBC_ALLOCATION_LOUDNESS,
-               .block_length = SBC_BLOCK_LENGTH_16,
-               .min_bitpool = MIN_BITPOOL,
-               .max_bitpool = MAX_BITPOOL
-       },
-       {
-               .frequency = SBC_SAMPLING_FREQ_48000,
-               .channel_mode = SBC_CHANNEL_MODE_JOINT_STEREO,
-               .subbands = SBC_SUBBANDS_8,
-               .allocation_method = SBC_ALLOCATION_LOUDNESS,
-               .block_length = SBC_BLOCK_LENGTH_16,
-               .min_bitpool = MIN_BITPOOL,
-               .max_bitpool = MAX_BITPOOL
-       },
-};
-
-static int sbc_get_presets(struct audio_preset *preset, size_t *len)
-{
-       int i;
-       int count;
-       size_t new_len = 0;
-       uint8_t *ptr = (uint8_t *) preset;
-       size_t preset_size = sizeof(*preset) + sizeof(a2dp_sbc_t);
-
-       count = sizeof(sbc_presets) / sizeof(sbc_presets[0]);
-
-       for (i = 0; i < count; i++) {
-               preset = (struct audio_preset *) ptr;
-
-               if (new_len + preset_size > *len)
-                       break;
-
-               preset->len = sizeof(a2dp_sbc_t);
-               memcpy(preset->data, &sbc_presets[i], preset->len);
-
-               new_len += preset_size;
-               ptr += preset_size;
-       }
-
-       *len = new_len;
-
-       return i;
-}
-
-static int sbc_freq2int(uint8_t freq)
-{
-       switch (freq) {
-       case SBC_SAMPLING_FREQ_16000:
-               return 16000;
-       case SBC_SAMPLING_FREQ_32000:
-               return 32000;
-       case SBC_SAMPLING_FREQ_44100:
-               return 44100;
-       case SBC_SAMPLING_FREQ_48000:
-               return 48000;
-       default:
-               return 0;
-       }
-}
-
-static const char *sbc_mode2str(uint8_t mode)
-{
-       switch (mode) {
-       case SBC_CHANNEL_MODE_MONO:
-               return "Mono";
-       case SBC_CHANNEL_MODE_DUAL_CHANNEL:
-               return "DualChannel";
-       case SBC_CHANNEL_MODE_STEREO:
-               return "Stereo";
-       case SBC_CHANNEL_MODE_JOINT_STEREO:
-               return "JointStereo";
-       default:
-               return "(unknown)";
-       }
-}
-
-static int sbc_blocks2int(uint8_t blocks)
-{
-       switch (blocks) {
-       case SBC_BLOCK_LENGTH_4:
-               return 4;
-       case SBC_BLOCK_LENGTH_8:
-               return 8;
-       case SBC_BLOCK_LENGTH_12:
-               return 12;
-       case SBC_BLOCK_LENGTH_16:
-               return 16;
-       default:
-               return 0;
-       }
-}
-
-static int sbc_subbands2int(uint8_t subbands)
-{
-       switch (subbands) {
-       case SBC_SUBBANDS_4:
-               return 4;
-       case SBC_SUBBANDS_8:
-               return 8;
-       default:
-               return 0;
-       }
-}
-
-static const char *sbc_allocation2str(uint8_t allocation)
-{
-       switch (allocation) {
-       case SBC_ALLOCATION_SNR:
-               return "SNR";
-       case SBC_ALLOCATION_LOUDNESS:
-               return "Loudness";
-       default:
-               return "(unknown)";
-       }
-}
-
-static void sbc_init_encoder(struct sbc_data *sbc_data)
-{
-       a2dp_sbc_t *in = &sbc_data->sbc;
-       sbc_t *out = &sbc_data->enc;
-
-       sbc_init_a2dp(out, 0L, in, sizeof(*in));
-
-       out->endian = SBC_LE;
-       out->bitpool = in->max_bitpool;
-
-       DBG("frequency=%d channel_mode=%s block_length=%d subbands=%d "
-                       "allocation=%s bitpool=%d-%d",
-                       sbc_freq2int(in->frequency),
-                       sbc_mode2str(in->channel_mode),
-                       sbc_blocks2int(in->block_length),
-                       sbc_subbands2int(in->subbands),
-                       sbc_allocation2str(in->allocation_method),
-                       in->min_bitpool, in->max_bitpool);
-}
-
-static int sbc_codec_init(struct audio_preset *preset, uint16_t mtu,
-                                                       void **codec_data)
-{
-       struct sbc_data *sbc_data;
-       size_t hdr_len = sizeof(struct media_packet);
-       size_t in_frame_len;
-       size_t out_frame_len;
-       size_t num_frames;
-
-       if (preset->len != sizeof(a2dp_sbc_t)) {
-               error("SBC: preset size mismatch");
-               return AUDIO_STATUS_FAILED;
-       }
-
-       sbc_data = calloc(sizeof(struct sbc_data), 1);
-       if (!sbc_data)
-               return AUDIO_STATUS_FAILED;
-
-       memcpy(&sbc_data->sbc, preset->data, preset->len);
-
-       sbc_init_encoder(sbc_data);
-
-       in_frame_len = sbc_get_codesize(&sbc_data->enc);
-       out_frame_len = sbc_get_frame_length(&sbc_data->enc);
-       num_frames = (mtu - hdr_len) / out_frame_len;
-
-       sbc_data->in_frame_len = in_frame_len;
-       sbc_data->in_buf_size = num_frames * in_frame_len;
-
-       sbc_data->out_buf_size = hdr_len + num_frames * out_frame_len;
-       sbc_data->out_buf = calloc(1, sbc_data->out_buf_size);
-
-       sbc_data->frame_duration = sbc_get_frame_duration(&sbc_data->enc);
-       sbc_data->frames_per_packet = num_frames;
-
-       DBG("mtu=%u in_frame_len=%zu out_frame_len=%zu frames_per_packet=%zu",
-                       mtu, in_frame_len, out_frame_len, num_frames);
-
-       *codec_data = sbc_data;
-
-       return AUDIO_STATUS_SUCCESS;
-}
-
-static int sbc_cleanup(void *codec_data)
-{
-       struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-
-       sbc_finish(&sbc_data->enc);
-       free(sbc_data->out_buf);
-       free(codec_data);
-
-       return AUDIO_STATUS_SUCCESS;
-}
-
-static int sbc_get_config(void *codec_data, struct audio_input_config *config)
-{
-       struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-
-       switch (sbc_data->sbc.frequency) {
-       case SBC_SAMPLING_FREQ_16000:
-               config->rate = 16000;
-               break;
-       case SBC_SAMPLING_FREQ_32000:
-               config->rate = 32000;
-               break;
-       case SBC_SAMPLING_FREQ_44100:
-               config->rate = 44100;
-               break;
-       case SBC_SAMPLING_FREQ_48000:
-               config->rate = 48000;
-               break;
-       default:
-               return AUDIO_STATUS_FAILED;
-       }
-       config->channels = sbc_data->sbc.channel_mode == SBC_CHANNEL_MODE_MONO ?
-                               AUDIO_CHANNEL_OUT_MONO :
-                               AUDIO_CHANNEL_OUT_STEREO;
-       config->format = AUDIO_FORMAT_PCM_16_BIT;
-
-       return AUDIO_STATUS_SUCCESS;
-}
-
-static size_t sbc_get_buffer_size(void *codec_data)
-{
-       struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-
-       return sbc_data->in_buf_size;
-}
-
-static size_t sbc_get_mediapacket_duration(void *codec_data)
-{
-       struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-
-       return sbc_data->frame_duration * sbc_data->frames_per_packet;
-}
-
-static void sbc_resume(void *codec_data)
-{
-       struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-
-       DBG("");
-
-       clock_gettime(CLOCK_MONOTONIC, &sbc_data->start);
-
-       sbc_data->frames_sent = 0;
-       sbc_data->timestamp = 0;
-}
-
-static int write_media_packet(int fd, struct sbc_data *sbc_data,
-                               struct media_packet *mp, size_t data_len)
-{
-       struct timespec cur;
-       struct timespec diff;
-       unsigned expected_frames;
-       int ret;
-
-       while (true) {
-               ret = write(fd, mp, sizeof(*mp) + data_len);
-               if (ret >= 0)
-                       break;
-
-               if (errno != EINTR)
-                       return -errno;
-       }
-
-       sbc_data->frames_sent += mp->payload.frame_count;
-
-       clock_gettime(CLOCK_MONOTONIC, &cur);
-       timespec_diff(&cur, &sbc_data->start, &diff);
-       expected_frames = (diff.tv_sec * 1000000 + diff.tv_nsec / 1000) /
-                                               sbc_data->frame_duration;
-
-       /* AudioFlinger does not seem to provide any *working*
-        * API to provide data in some interval and will just
-        * send another buffer as soon as we process current
-        * one. To prevent overflowing L2CAP socket, we need to
-        * introduce some artificial delay here base on how many
-        * audio frames were sent so far, i.e. if we're not
-        * lagging behind audio stream, we can sleep for
-        * duration of single media packet.
-        */
-       if (sbc_data->frames_sent >= expected_frames)
-               usleep(sbc_data->frame_duration *
-                               mp->payload.frame_count);
-
-       return ret;
-}
-
-static ssize_t sbc_write_data(void *codec_data, const void *buffer,
-                                                       size_t bytes, int fd)
-{
-       struct sbc_data *sbc_data = (struct sbc_data *) codec_data;
-       size_t consumed = 0;
-       size_t encoded = 0;
-       struct media_packet *mp = (struct media_packet *) sbc_data->out_buf;
-       size_t free_space = sbc_data->out_buf_size - sizeof(*mp);
-       int ret;
-       ssize_t bytes_read;
-
-       mp->hdr.v = 2;
-       mp->hdr.pt = 1;
-       mp->hdr.ssrc = htonl(1);
-       mp->hdr.timestamp = htonl(sbc_data->timestamp);
-       mp->payload.frame_count = 0;
-
-       while (bytes - consumed >= sbc_data->in_frame_len) {
-               ssize_t written = 0;
-
-               bytes_read = sbc_encode(&sbc_data->enc, buffer + consumed,
-                                       sbc_data->in_frame_len,
-                                       mp->data + encoded, free_space,
-                                       &written);
-
-               if (bytes_read < 0) {
-                       error("SBC: failed to encode block (%zd)", bytes_read);
-                       break;
-               }
-
-               mp->payload.frame_count++;
-
-               consumed += bytes_read;
-               encoded += written;
-               free_space -= written;
-
-               /* AudioFlinger provides PCM 16bit stereo only, thus sample size
-                * is always 4 bytes
-                */
-               sbc_data->timestamp += (bytes_read / 4);
-
-               /* write data if we either filled media packed or encoded all
-                * input data
-                */
-               if (mp->payload.frame_count == sbc_data->frames_per_packet ||
-                               bytes == consumed ||
-                               mp->payload.frame_count ==
-                                                       MAX_FRAMES_IN_PAYLOAD) {
-                       mp->hdr.sequence_number = htons(sbc_data->seq++);
-
-                       ret = write_media_packet(fd, sbc_data, mp, encoded);
-                       if (ret < 0)
-                               return ret;
-
-                       encoded = 0;
-                       free_space = sbc_data->out_buf_size - sizeof(*mp);
-                       mp->hdr.timestamp = htonl(sbc_data->timestamp);
-                       mp->payload.frame_count = 0;
-               }
-       }
-
-       if (consumed != bytes) {
-               /* we should encode all input data
-                * if we did not, something went wrong but we can't really
-                * handle this so this is just sanity check
-                */
-               error("SBC: failed to encode complete input buffer");
-       }
-
-       /* we always assume that all data was processed and sent */
-       return bytes;
-}
-
 static int audio_ipc_cmd(uint8_t service_id, uint8_t opcode, uint16_t len,
                        void *param, size_t *rsp_len, void *rsp, int *fd)
 {
@@ -747,8 +287,7 @@ static int audio_ipc_cmd(uint8_t service_id, uint8_t opcode, uint16_t len,
                        goto failed;
        }
 
-       if (rsp_len)
-               *rsp_len = cmd.len;
+       *rsp_len = cmd.len;
 
        return AUDIO_STATUS_SUCCESS;
 
@@ -802,7 +341,7 @@ static int ipc_close_cmd(uint8_t endpoint_id)
        return result;
 }
 
-static int ipc_open_stream_cmd(uint8_t endpoint_id, uint16_t *mtu, int *fd,
+static int ipc_open_stream_cmd(uint8_t *endpoint_id, uint16_t *mtu, int *fd,
                                                struct audio_preset **caps)
 {
        char buf[BLUEZ_AUDIO_MTU];
@@ -817,13 +356,14 @@ static int ipc_open_stream_cmd(uint8_t endpoint_id, uint16_t *mtu, int *fd,
        if (!caps)
                return AUDIO_STATUS_FAILED;
 
-       cmd.id = endpoint_id;
+       cmd.id = *endpoint_id;
 
        result = audio_ipc_cmd(AUDIO_SERVICE_ID, AUDIO_OP_OPEN_STREAM,
                                sizeof(cmd), &cmd, &rsp_len, rsp, fd);
        if (result == AUDIO_STATUS_SUCCESS) {
                size_t buf_len = sizeof(struct audio_preset) +
                                        rsp->preset[0].len;
+               *endpoint_id = rsp->id;
                *mtu = rsp->mtu;
                *caps = malloc(buf_len);
                memcpy(*caps, &rsp->preset, buf_len);
@@ -879,25 +419,53 @@ static int ipc_suspend_stream_cmd(uint8_t endpoint_id)
        return result;
 }
 
+struct register_state {
+       struct audio_endpoint *ep;
+       bool error;
+};
+
+static void register_endpoint(const struct audio_codec *codec,
+                                               struct register_state *state)
+{
+       struct audio_endpoint *ep = state->ep;
+
+       /* don't even try to register more endpoints if one failed */
+       if (state->error)
+               return;
+
+       ep->id = ipc_open_cmd(codec);
+
+       if (!ep->id) {
+               state->error = true;
+               error("Failed to register endpoint");
+               return;
+       }
+
+       ep->codec = codec;
+       ep->codec_data = NULL;
+       ep->fd = -1;
+
+       state->ep++;
+}
+
 static int register_endpoints(void)
 {
-       struct audio_endpoint *ep = &audio_endpoints[0];
-       size_t i;
+       struct register_state state;
+       unsigned int i;
 
-       for (i = 0; i < NUM_CODECS; i++, ep++) {
-               const struct audio_codec *codec = &audio_codecs[i];
+       state.ep = &audio_endpoints[0];
+       state.error = false;
 
-               ep->id = ipc_open_cmd(codec);
+       for (i = 0; i < NUM_CODECS; i++) {
+               const struct audio_codec *codec = audio_codecs[i].get_codec();
 
-               if (!ep->id)
-                       return AUDIO_STATUS_FAILED;
+               if (!audio_codecs[i].loaded)
+                       continue;
 
-               ep->codec = codec;
-               ep->codec_data = NULL;
-               ep->fd = -1;
+               register_endpoint(codec, &state);
        }
 
-       return AUDIO_STATUS_SUCCESS;
+       return state.error ? AUDIO_STATUS_FAILED : AUDIO_STATUS_SUCCESS;
 }
 
 static void unregister_endpoints(void)
@@ -914,47 +482,64 @@ static void unregister_endpoints(void)
        }
 }
 
-static int set_blocking(int fd)
-{
-       int flags;
-
-       flags = fcntl(fd, F_GETFL, 0);
-       if (flags < 0) {
-               int err = -errno;
-               error("fcntl(F_GETFL): %s (%d)", strerror(-err), -err);
-               return err;
-       }
-
-       if (fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) < 0) {
-               int err = -errno;
-               error("fcntl(F_SETFL): %s (%d)", strerror(-err), -err);
-               return err;
-       }
-
-       return 0;
-}
-
-static bool open_endpoint(struct audio_endpoint *ep,
+static bool open_endpoint(struct audio_endpoint **epp,
                                                struct audio_input_config *cfg)
 {
        struct audio_preset *preset;
+       struct audio_endpoint *ep = *epp;
        const struct audio_codec *codec;
        uint16_t mtu;
+       uint16_t payload_len;
        int fd;
+       size_t i;
+       uint8_t ep_id = 0;
 
-       if (ipc_open_stream_cmd(ep->id, &mtu, &fd, &preset) !=
+       if (ep)
+               ep_id = ep->id;
+
+       if (ipc_open_stream_cmd(&ep_id, &mtu, &fd, &preset) !=
                                                        AUDIO_STATUS_SUCCESS)
                return false;
 
-       if (set_blocking(fd) < 0)
+       DBG("ep_id=%d mtu=%u", ep_id, mtu);
+
+       for (i = 0; i < MAX_AUDIO_ENDPOINTS; i++)
+               if (audio_endpoints[i].id == ep_id) {
+                       ep = &audio_endpoints[i];
+                       break;
+               }
+
+       if (!ep) {
+               error("Cound not find opened endpoint");
                goto failed;
+       }
+
+       *epp = ep;
+
+       payload_len = mtu;
+       if (ep->codec->use_rtp)
+               payload_len -= sizeof(struct rtp_header);
 
        ep->fd = fd;
 
        codec = ep->codec;
-       codec->init(preset, mtu, &ep->codec_data);
+       codec->init(preset, payload_len, &ep->codec_data);
        codec->get_config(ep->codec_data, cfg);
 
+       ep->mp = calloc(mtu, 1);
+       if (!ep->mp)
+               goto failed;
+
+       if (ep->codec->use_rtp) {
+               struct media_packet_rtp *mp_rtp =
+                                       (struct media_packet_rtp *) ep->mp;
+               mp_rtp->hdr.v = 2;
+               mp_rtp->hdr.pt = 0x60;
+               mp_rtp->hdr.ssrc = htonl(1);
+       }
+
+       ep->mp_data_len = payload_len;
+
        free(preset);
 
        return true;
@@ -974,29 +559,225 @@ static void close_endpoint(struct audio_endpoint *ep)
                ep->fd = -1;
        }
 
+       free(ep->mp);
+
        ep->codec->cleanup(ep->codec_data);
        ep->codec_data = NULL;
 }
 
+static bool resume_endpoint(struct audio_endpoint *ep)
+{
+       if (ipc_resume_stream_cmd(ep->id) != AUDIO_STATUS_SUCCESS)
+               return false;
+
+       ep->samples = 0;
+       ep->resync = false;
+
+       ep->codec->update_qos(ep->codec_data, QOS_POLICY_DEFAULT);
+
+       return true;
+}
+
 static void downmix_to_mono(struct a2dp_stream_out *out, const uint8_t *buffer,
                                                                size_t bytes)
 {
        const int16_t *input = (const void *) buffer;
        int16_t *output = (void *) out->downmix_buf;
-       size_t i;
+       size_t i, frames;
+
+       /* PCM 16bit stereo */
+       frames = bytes / (2 * sizeof(int16_t));
+
+       for (i = 0; i < frames; i++) {
+               int16_t l = get_le16(&input[i * 2]);
+               int16_t r = get_le16(&input[i * 2 + 1]);
+
+               put_le16((l + r) / 2, &output[i]);
+       }
+}
+
+static bool wait_for_endpoint(struct audio_endpoint *ep, bool *writable)
+{
+       int ret;
+
+       while (true) {
+               struct pollfd pollfd;
+
+               pollfd.fd = ep->fd;
+               pollfd.events = POLLOUT;
+               pollfd.revents = 0;
+
+               ret = poll(&pollfd, 1, 500);
+
+               if (ret >= 0) {
+                       *writable = !!(pollfd.revents & POLLOUT);
+                       break;
+               }
+
+               if (errno != EINTR) {
+                       ret = errno;
+                       error("poll failed (%d)", ret);
+                       return false;
+               }
+       }
+
+       return true;
+}
 
-       for (i = 0; i < bytes / 2; i++) {
-               int16_t l = le16_to_cpu(get_unaligned(&input[i * 2]));
-               int16_t r = le16_to_cpu(get_unaligned(&input[i * 2 + 1]));
+static bool write_to_endpoint(struct audio_endpoint *ep, size_t bytes)
+{
+       struct media_packet *mp = (struct media_packet *) ep->mp;
+       int ret;
+
+       while (true) {
+               ret = write(ep->fd, mp, bytes);
+
+               if (ret >= 0)
+                       break;
+
+               /*
+                * this should not happen so let's issue warning, but do not
+                * fail, we can try to write next packet
+                */
+               if (errno == EAGAIN) {
+                       ret = errno;
+                       warn("write failed (%d)", ret);
+                       break;
+               }
 
-               put_unaligned(cpu_to_le16((l + r) / 2), &output[i]);
+               if (errno != EINTR) {
+                       ret = errno;
+                       error("write failed (%d)", ret);
+                       return false;
+               }
        }
+
+       return true;
+}
+
+static bool write_data(struct a2dp_stream_out *out, const void *buffer,
+                                                               size_t bytes)
+{
+       struct audio_endpoint *ep = out->ep;
+       struct media_packet *mp = (struct media_packet *) ep->mp;
+       struct media_packet_rtp *mp_rtp = (struct media_packet_rtp *) ep->mp;
+       size_t free_space = ep->mp_data_len;
+       size_t consumed = 0;
+
+       while (consumed < bytes) {
+               size_t written = 0;
+               ssize_t read;
+               uint32_t samples;
+               int ret;
+               struct timespec current;
+               uint64_t audio_sent, audio_passed;
+               bool do_write = false;
+
+               /*
+                * prepare media packet in advance so we don't waste time after
+                * wakeup
+                */
+               if (ep->codec->use_rtp) {
+                       mp_rtp->hdr.sequence_number = htons(ep->seq++);
+                       mp_rtp->hdr.timestamp = htonl(ep->samples);
+               }
+               read = ep->codec->encode_mediapacket(ep->codec_data,
+                                               buffer + consumed,
+                                               bytes - consumed, mp,
+                                               free_space, &written);
+
+               /*
+                * not much we can do here, let's just ignore remaining
+                * data and continue
+                */
+               if (read <= 0)
+                       return true;
+
+               /* calculate where are we and where we should be */
+               clock_gettime(CLOCK_MONOTONIC, &current);
+               if (!ep->samples)
+                       memcpy(&ep->start, &current, sizeof(ep->start));
+               audio_sent = ep->samples * 1000000ll / out->cfg.rate;
+               audio_passed = timespec_diff_us(&current, &ep->start);
+
+               /*
+                * if we're ahead of stream then wait for next write point,
+                * if we're lagging more than 100ms then stop writing and just
+                * skip data until we're back in sync
+                */
+               if (audio_sent > audio_passed) {
+                       struct timespec anchor;
+
+                       ep->resync = false;
+
+                       timespec_add(&ep->start, audio_sent, &anchor);
+
+                       while (true) {
+                               ret = clock_nanosleep(CLOCK_MONOTONIC,
+                                                       TIMER_ABSTIME, &anchor,
+                                                       NULL);
+
+                               if (!ret)
+                                       break;
+
+                               if (ret != EINTR) {
+                                       error("clock_nanosleep failed (%d)",
+                                                                       ret);
+                                       return false;
+                               }
+                       }
+               } else if (!ep->resync) {
+                       uint64_t diff = audio_passed - audio_sent;
+
+                       if (diff > MAX_DELAY) {
+                               warn("lag is %jums, resyncing", diff / 1000);
+
+                               ep->codec->update_qos(ep->codec_data,
+                                                       QOS_POLICY_DECREASE);
+                               ep->resync = true;
+                       }
+               }
+
+               /* we send data only in case codec encoded some data, i.e. some
+                * codecs do internal buffering and output data only if full
+                * frame can be encoded
+                * in resync mode we'll just drop mediapackets
+                */
+               if (written > 0 && !ep->resync) {
+                       /* wait some time for socket to be ready for write,
+                        * but we'll just skip writing data if timeout occurs
+                        */
+                       if (!wait_for_endpoint(ep, &do_write))
+                               return false;
+
+                       if (do_write) {
+                               if (ep->codec->use_rtp)
+                                       written += sizeof(struct rtp_header);
+
+                               if (!write_to_endpoint(ep, written))
+                                       return false;
+                       }
+               }
+
+               /*
+                * AudioFlinger provides 16bit PCM, so sample size is 2 bytes
+                * multiplied by number of channels. Number of channels is
+                * simply number of bits set in channels mask.
+                */
+               samples = read / (2 * popcount(out->cfg.channels));
+               ep->samples += samples;
+               consumed += read;
+       }
+
+       return true;
 }
 
 static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
                                                                size_t bytes)
 {
        struct a2dp_stream_out *out = (struct a2dp_stream_out *) stream;
+       const void *in_buf = buffer;
+       size_t in_len = bytes;
 
        /* just return in case we're closing */
        if (out->audio_state == AUDIO_A2DP_STATE_NONE)
@@ -1006,11 +787,9 @@ static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
        if (out->audio_state == AUDIO_A2DP_STATE_STANDBY) {
                DBG("stream in standby, auto-start");
 
-               if (ipc_resume_stream_cmd(out->ep->id) != AUDIO_STATUS_SUCCESS)
+               if (!resume_endpoint(out->ep))
                        return -1;
 
-               out->ep->codec->resume(out->ep->codec_data);
-
                out->audio_state = AUDIO_A2DP_STATE_STARTED;
        }
 
@@ -1024,7 +803,8 @@ static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
                return -1;
        }
 
-       /* currently Android audioflinger is not able to provide mono stream on
+       /*
+        * currently Android audioflinger is not able to provide mono stream on
         * A2DP output so down mixing needs to be done in hal-audio plugin.
         *
         * for reference see
@@ -1039,14 +819,14 @@ static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
 
                downmix_to_mono(out, buffer, bytes);
 
-               return out->ep->codec->write_data(out->ep->codec_data,
-                                                       out->downmix_buf,
-                                                       bytes / 2,
-                                                       out->ep->fd) * 2;
+               in_buf = out->downmix_buf;
+               in_len = bytes / 2;
        }
 
-       return out->ep->codec->write_data(out->ep->codec_data, buffer,
-                                                       bytes, out->ep->fd);
+       if (!write_data(out, in_buf, in_len))
+               return -1;
+
+       return bytes;
 }
 
 static uint32_t out_get_sample_rate(const struct audio_stream *stream)
@@ -1076,7 +856,8 @@ static size_t out_get_buffer_size(const struct audio_stream *stream)
 {
        DBG("");
 
-       /* We should return proper buffer size calculated by codec (so each
+       /*
+        * We should return proper buffer size calculated by codec (so each
         * input buffer is encoded into single media packed) but this does not
         * work well with AudioFlinger and causes problems. For this reason we
         * use magic value here and out_write code takes care of splitting
@@ -1089,7 +870,8 @@ static uint32_t out_get_channels(const struct audio_stream *stream)
 {
        DBG("");
 
-       /* AudioFlinger can only provide stereo stream, so we return it here and
+       /*
+        * AudioFlinger can only provide stereo stream, so we return it here and
         * later we'll downmix this to mono in case codec requires it
         */
 
@@ -1144,6 +926,9 @@ static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
        DBG("%s", kvpairs);
 
        str = strdup(kvpairs);
+       if (!str)
+               return -ENOMEM;
+
        kvpair = strtok_r(str, ";", &saveptr);
 
        for (; kvpair && *kvpair; kvpair = strtok_r(NULL, ";", &saveptr)) {
@@ -1324,13 +1109,13 @@ static int in_remove_audio_effect(const struct audio_stream *stream,
        return -ENOSYS;
 }
 
-static int audio_open_output_stream(struct audio_hw_device *dev,
+static int audio_open_output_stream_real(struct audio_hw_device *dev,
                                        audio_io_handle_t handle,
                                        audio_devices_t devices,
                                        audio_output_flags_t flags,
                                        struct audio_config *config,
-                                       struct audio_stream_out **stream_out)
-
+                                       struct audio_stream_out **stream_out,
+                                       const char *address)
 {
        struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *) dev;
        struct a2dp_stream_out *out;
@@ -1358,10 +1143,10 @@ static int audio_open_output_stream(struct audio_hw_device *dev,
        out->stream.write = out_write;
        out->stream.get_render_position = out_get_render_position;
 
-       /* TODO: for now we always use endpoint 0 */
-       out->ep = &audio_endpoints[0];
+       /* We want to autoselect opened endpoint */
+       out->ep = NULL;
 
-       if (!open_endpoint(out->ep, &out->cfg))
+       if (!open_endpoint(&out->ep, &out->cfg))
                goto fail;
 
        DBG("rate=%d channels=%d format=%d", out->cfg.rate,
@@ -1387,6 +1172,31 @@ fail:
        return -EIO;
 }
 
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+static int audio_open_output_stream(struct audio_hw_device *dev,
+                                       audio_io_handle_t handle,
+                                       audio_devices_t devices,
+                                       audio_output_flags_t flags,
+                                       struct audio_config *config,
+                                       struct audio_stream_out **stream_out,
+                                       const char *address)
+{
+       return audio_open_output_stream_real(dev, handle, devices, flags,
+                                               config, stream_out, address);
+}
+#else
+static int audio_open_output_stream(struct audio_hw_device *dev,
+                                       audio_io_handle_t handle,
+                                       audio_devices_t devices,
+                                       audio_output_flags_t flags,
+                                       struct audio_config *config,
+                                       struct audio_stream_out **stream_out)
+{
+       return audio_open_output_stream_real(dev, handle, devices, flags,
+                                               config, stream_out, NULL);
+}
+#endif
+
 static void audio_close_output_stream(struct audio_hw_device *dev,
                                        struct audio_stream_out *stream)
 {
@@ -1468,11 +1278,14 @@ static size_t audio_get_input_buffer_size(const struct audio_hw_device *dev,
        return -ENOSYS;
 }
 
-static int audio_open_input_stream(struct audio_hw_device *dev,
+static int audio_open_input_stream_real(struct audio_hw_device *dev,
                                        audio_io_handle_t handle,
                                        audio_devices_t devices,
                                        struct audio_config *config,
-                                       struct audio_stream_in **stream_in)
+                                       struct audio_stream_in **stream_in,
+                                       audio_input_flags_t flags,
+                                       const char *address,
+                                       audio_source_t source)
 {
        struct audio_stream_in *in;
 
@@ -1503,6 +1316,32 @@ static int audio_open_input_stream(struct audio_hw_device *dev,
        return 0;
 }
 
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+static int audio_open_input_stream(struct audio_hw_device *dev,
+                                       audio_io_handle_t handle,
+                                       audio_devices_t devices,
+                                       struct audio_config *config,
+                                       struct audio_stream_in **stream_in,
+                                       audio_input_flags_t flags,
+                                       const char *address,
+                                       audio_source_t source)
+{
+       return audio_open_input_stream_real(dev, handle, devices, config,
+                                               stream_in, flags, address,
+                                               source);
+}
+#else
+static int audio_open_input_stream(struct audio_hw_device *dev,
+                                       audio_io_handle_t handle,
+                                       audio_devices_t devices,
+                                       struct audio_config *config,
+                                       struct audio_stream_in **stream_in)
+{
+       return audio_open_input_stream_real(dev, handle, devices, config,
+                                               stream_in, 0, NULL, 0);
+}
+#endif
+
 static void audio_close_input_stream(struct audio_hw_device *dev,
                                        struct audio_stream_in *stream_in)
 {
@@ -1516,14 +1355,72 @@ static int audio_dump(const audio_hw_device_t *device, int fd)
        return -ENOSYS;
 }
 
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+static int set_master_mute(struct audio_hw_device *dev, bool mute)
+{
+       DBG("");
+       return -ENOSYS;
+}
+
+static int get_master_mute(struct audio_hw_device *dev, bool *mute)
+{
+       DBG("");
+       return -ENOSYS;
+}
+
+static int create_audio_patch(struct audio_hw_device *dev,
+                                       unsigned int num_sources,
+                                       const struct audio_port_config *sources,
+                                       unsigned int num_sinks,
+                                       const struct audio_port_config *sinks,
+                                       audio_patch_handle_t *handle)
+{
+       DBG("");
+       return -ENOSYS;
+}
+
+static int release_audio_patch(struct audio_hw_device *dev,
+                                       audio_patch_handle_t handle)
+{
+       DBG("");
+       return -ENOSYS;
+}
+
+static int get_audio_port(struct audio_hw_device *dev, struct audio_port *port)
+{
+       DBG("");
+       return -ENOSYS;
+}
+
+static int set_audio_port_config(struct audio_hw_device *dev,
+                                       const struct audio_port_config *config)
+{
+       DBG("");
+       return -ENOSYS;
+}
+#endif
+
 static int audio_close(hw_device_t *device)
 {
        struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *)device;
+       unsigned int i;
 
        DBG("");
 
        unregister_endpoints();
 
+       for (i = 0; i < NUM_CODECS; i++) {
+               const struct audio_codec *codec = audio_codecs[i].get_codec();
+
+               if (!audio_codecs[i].loaded)
+                       continue;
+
+               if (codec->unload)
+                       codec->unload();
+
+               audio_codecs[i].loaded = false;
+       }
+
        shutdown(listen_sk, SHUT_RDWR);
        shutdown(audio_sk, SHUT_RDWR);
 
@@ -1588,14 +1485,12 @@ static void *ipc_handler(void *data)
                /* Check if socket is still alive. Empty while loop.*/
                while (poll(&pfd, 1, -1) < 0 && errno == EINTR);
 
-               if (pfd.revents & (POLLHUP | POLLERR | POLLNVAL)) {
-                       info("Audio HAL: Socket closed");
+               info("Audio HAL: Socket closed");
 
-                       pthread_mutex_lock(&sk_mutex);
-                       close(audio_sk);
-                       audio_sk = -1;
-                       pthread_mutex_unlock(&sk_mutex);
-               }
+               pthread_mutex_lock(&sk_mutex);
+               close(audio_sk);
+               audio_sk = -1;
+               pthread_mutex_unlock(&sk_mutex);
        }
 
        /* audio_sk is closed at this point, just cleanup endpoints states */
@@ -1663,6 +1558,7 @@ static int audio_open(const hw_module_t *module, const char *name,
                                                        hw_device_t **device)
 {
        struct a2dp_audio_dev *a2dp_dev;
+       size_t i;
        int err;
 
        DBG("");
@@ -1700,10 +1596,29 @@ static int audio_open(const hw_module_t *module, const char *name,
        a2dp_dev->dev.open_input_stream = audio_open_input_stream;
        a2dp_dev->dev.close_input_stream = audio_close_input_stream;
        a2dp_dev->dev.dump = audio_dump;
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+       a2dp_dev->dev.set_master_mute = set_master_mute;
+       a2dp_dev->dev.get_master_mute = get_master_mute;
+       a2dp_dev->dev.create_audio_patch = create_audio_patch;
+       a2dp_dev->dev.release_audio_patch = release_audio_patch;
+       a2dp_dev->dev.get_audio_port = get_audio_port;
+       a2dp_dev->dev.set_audio_port_config = set_audio_port_config;
+#endif
+
+       for (i = 0; i < NUM_CODECS; i++) {
+               const struct audio_codec *codec = audio_codecs[i].get_codec();
 
-       /* Note that &a2dp_dev->dev.common is the same pointer as a2dp_dev.
+               if (codec->load && !codec->load())
+                       continue;
+
+               audio_codecs[i].loaded = true;
+       }
+
+       /*
+        * Note that &a2dp_dev->dev.common is the same pointer as a2dp_dev.
         * This results from the structure of following structs:a2dp_audio_dev,
-        * audio_hw_device. We will rely on this later in the code.*/
+        * audio_hw_device. We will rely on this later in the code.
+        */
        *device = &a2dp_dev->dev.common;
 
        return 0;
@@ -1715,12 +1630,12 @@ static struct hw_module_methods_t hal_module_methods = {
 
 struct audio_module HAL_MODULE_INFO_SYM = {
        .common = {
-       .tag = HARDWARE_MODULE_TAG,
-       .version_major = 1,
-       .version_minor = 0,
-       .id = AUDIO_HARDWARE_MODULE_ID,
-       .name = "A2DP Bluez HW HAL",
-       .author = "Intel Corporation",
-       .methods = &hal_module_methods,
+               .tag = HARDWARE_MODULE_TAG,
+               .version_major = 1,
+               .version_minor = 0,
+               .id = AUDIO_HARDWARE_MODULE_ID,
+               .name = "A2DP Bluez HW HAL",
+               .author = "Intel Corporation",
+               .methods = &hal_module_methods,
        },
 };