#include "hal-log.h"
#include "hal-msg.h"
#include "hal-audio.h"
-#include "../src/shared/util.h"
+#include "hal-utils.h"
+#include "hal.h"
#define FIXED_A2DP_PLAYBACK_LATENCY_MS 25
return res.tv_sec * 1000000ll + res.tv_nsec / 1000ll;
}
-#if defined(ANDROID)
+#if ANDROID_VERSION < PLATFORM_VER(6, 0, 0)
/*
* Bionic does not have clock_nanosleep() prototype in time.h even though
* it provides its implementation.
struct timespec *remain);
#endif
-static const audio_codec_get_t audio_codecs[] = {
- codec_sbc,
+static struct {
+ const audio_codec_get_t get_codec;
+ bool loaded;
+} audio_codecs[] = {
+ { .get_codec = codec_aptx, .loaded = false },
+ { .get_codec = codec_sbc, .loaded = false },
};
#define NUM_CODECS (sizeof(audio_codecs) / sizeof(audio_codecs[0]))
goto failed;
}
- if (rsp_len)
- *rsp_len = cmd.len;
+ *rsp_len = cmd.len;
return AUDIO_STATUS_SUCCESS;
return result;
}
-static int ipc_open_stream_cmd(uint8_t endpoint_id, uint16_t *mtu, int *fd,
+static int ipc_open_stream_cmd(uint8_t *endpoint_id, uint16_t *mtu, int *fd,
struct audio_preset **caps)
{
char buf[BLUEZ_AUDIO_MTU];
if (!caps)
return AUDIO_STATUS_FAILED;
- cmd.id = endpoint_id;
+ cmd.id = *endpoint_id;
result = audio_ipc_cmd(AUDIO_SERVICE_ID, AUDIO_OP_OPEN_STREAM,
sizeof(cmd), &cmd, &rsp_len, rsp, fd);
if (result == AUDIO_STATUS_SUCCESS) {
size_t buf_len = sizeof(struct audio_preset) +
rsp->preset[0].len;
+ *endpoint_id = rsp->id;
*mtu = rsp->mtu;
*caps = malloc(buf_len);
memcpy(*caps, &rsp->preset, buf_len);
return result;
}
-static int register_endpoints(void)
+struct register_state {
+ struct audio_endpoint *ep;
+ bool error;
+};
+
+static void register_endpoint(const struct audio_codec *codec,
+ struct register_state *state)
{
- struct audio_endpoint *ep = &audio_endpoints[0];
- size_t i;
+ struct audio_endpoint *ep = state->ep;
+
+ /* don't even try to register more endpoints if one failed */
+ if (state->error)
+ return;
+
+ ep->id = ipc_open_cmd(codec);
- for (i = 0; i < NUM_CODECS; i++, ep++) {
- const struct audio_codec *codec = audio_codecs[i]();
+ if (!ep->id) {
+ state->error = true;
+ error("Failed to register endpoint");
+ return;
+ }
+
+ ep->codec = codec;
+ ep->codec_data = NULL;
+ ep->fd = -1;
+
+ state->ep++;
+}
- if (!codec)
- return AUDIO_STATUS_FAILED;
+static int register_endpoints(void)
+{
+ struct register_state state;
+ unsigned int i;
- ep->id = ipc_open_cmd(codec);
+ state.ep = &audio_endpoints[0];
+ state.error = false;
- if (!ep->id)
- return AUDIO_STATUS_FAILED;
+ for (i = 0; i < NUM_CODECS; i++) {
+ const struct audio_codec *codec = audio_codecs[i].get_codec();
- ep->codec = codec;
- ep->codec_data = NULL;
- ep->fd = -1;
+ if (!audio_codecs[i].loaded)
+ continue;
+
+ register_endpoint(codec, &state);
}
- return AUDIO_STATUS_SUCCESS;
+ return state.error ? AUDIO_STATUS_FAILED : AUDIO_STATUS_SUCCESS;
}
static void unregister_endpoints(void)
}
}
-static bool open_endpoint(struct audio_endpoint *ep,
+static bool open_endpoint(struct audio_endpoint **epp,
struct audio_input_config *cfg)
{
struct audio_preset *preset;
+ struct audio_endpoint *ep = *epp;
const struct audio_codec *codec;
uint16_t mtu;
uint16_t payload_len;
int fd;
+ size_t i;
+ uint8_t ep_id = 0;
- if (ipc_open_stream_cmd(ep->id, &mtu, &fd, &preset) !=
+ if (ep)
+ ep_id = ep->id;
+
+ if (ipc_open_stream_cmd(&ep_id, &mtu, &fd, &preset) !=
AUDIO_STATUS_SUCCESS)
return false;
- DBG("mtu=%u", mtu);
+ DBG("ep_id=%d mtu=%u", ep_id, mtu);
- payload_len = mtu - sizeof(*ep->mp);
+ for (i = 0; i < MAX_AUDIO_ENDPOINTS; i++)
+ if (audio_endpoints[i].id == ep_id) {
+ ep = &audio_endpoints[i];
+ break;
+ }
+
+ if (!ep) {
+ error("Cound not find opened endpoint");
+ goto failed;
+ }
+
+ *epp = ep;
+
+ payload_len = mtu;
+ if (ep->codec->use_rtp)
+ payload_len -= sizeof(struct rtp_header);
ep->fd = fd;
ep->mp = calloc(mtu, 1);
if (!ep->mp)
goto failed;
- ep->mp->hdr.v = 2;
- ep->mp->hdr.pt = 0x60;
- ep->mp->hdr.ssrc = htonl(1);
+
+ if (ep->codec->use_rtp) {
+ struct media_packet_rtp *mp_rtp =
+ (struct media_packet_rtp *) ep->mp;
+ mp_rtp->hdr.v = 2;
+ mp_rtp->hdr.pt = 0x60;
+ mp_rtp->hdr.ssrc = htonl(1);
+ }
ep->mp_data_len = payload_len;
frames = bytes / (2 * sizeof(int16_t));
for (i = 0; i < frames; i++) {
- int16_t l = le16_to_cpu(get_unaligned(&input[i * 2]));
- int16_t r = le16_to_cpu(get_unaligned(&input[i * 2 + 1]));
+ int16_t l = get_le16(&input[i * 2]);
+ int16_t r = get_le16(&input[i * 2 + 1]);
- put_unaligned(cpu_to_le16((l + r) / 2), &output[i]);
+ put_le16((l + r) / 2, &output[i]);
}
}
int ret;
while (true) {
- ret = write(ep->fd, mp, sizeof(*mp) + bytes);
+ ret = write(ep->fd, mp, bytes);
if (ret >= 0)
break;
{
struct audio_endpoint *ep = out->ep;
struct media_packet *mp = (struct media_packet *) ep->mp;
+ struct media_packet_rtp *mp_rtp = (struct media_packet_rtp *) ep->mp;
size_t free_space = ep->mp_data_len;
size_t consumed = 0;
* prepare media packet in advance so we don't waste time after
* wakeup
*/
- mp->hdr.sequence_number = htons(ep->seq++);
- mp->hdr.timestamp = htonl(ep->samples);
+ if (ep->codec->use_rtp) {
+ mp_rtp->hdr.sequence_number = htons(ep->seq++);
+ mp_rtp->hdr.timestamp = htonl(ep->samples);
+ }
read = ep->codec->encode_mediapacket(ep->codec_data,
buffer + consumed,
bytes - consumed, mp,
}
}
- /* in resync mode we'll just drop mediapackets */
- if (!ep->resync) {
+ /* we send data only in case codec encoded some data, i.e. some
+ * codecs do internal buffering and output data only if full
+ * frame can be encoded
+ * in resync mode we'll just drop mediapackets
+ */
+ if (written > 0 && !ep->resync) {
/* wait some time for socket to be ready for write,
* but we'll just skip writing data if timeout occurs
*/
if (!wait_for_endpoint(ep, &do_write))
return false;
- if (do_write)
+ if (do_write) {
+ if (ep->codec->use_rtp)
+ written += sizeof(struct rtp_header);
+
if (!write_to_endpoint(ep, written))
return false;
+ }
}
/*
return -ENOSYS;
}
-static int audio_open_output_stream(struct audio_hw_device *dev,
+static int audio_open_output_stream_real(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
- struct audio_stream_out **stream_out)
-
+ struct audio_stream_out **stream_out,
+ const char *address)
{
struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *) dev;
struct a2dp_stream_out *out;
out->stream.write = out_write;
out->stream.get_render_position = out_get_render_position;
- /* TODO: for now we always use endpoint 0 */
- out->ep = &audio_endpoints[0];
+ /* We want to autoselect opened endpoint */
+ out->ep = NULL;
- if (!open_endpoint(out->ep, &out->cfg))
+ if (!open_endpoint(&out->ep, &out->cfg))
goto fail;
DBG("rate=%d channels=%d format=%d", out->cfg.rate,
return -EIO;
}
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+static int audio_open_output_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ struct audio_stream_out **stream_out,
+ const char *address)
+{
+ return audio_open_output_stream_real(dev, handle, devices, flags,
+ config, stream_out, address);
+}
+#else
+static int audio_open_output_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ audio_output_flags_t flags,
+ struct audio_config *config,
+ struct audio_stream_out **stream_out)
+{
+ return audio_open_output_stream_real(dev, handle, devices, flags,
+ config, stream_out, NULL);
+}
+#endif
+
static void audio_close_output_stream(struct audio_hw_device *dev,
struct audio_stream_out *stream)
{
return -ENOSYS;
}
-static int audio_open_input_stream(struct audio_hw_device *dev,
+static int audio_open_input_stream_real(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
- struct audio_stream_in **stream_in)
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source)
{
struct audio_stream_in *in;
return 0;
}
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+static int audio_open_input_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ struct audio_stream_in **stream_in,
+ audio_input_flags_t flags,
+ const char *address,
+ audio_source_t source)
+{
+ return audio_open_input_stream_real(dev, handle, devices, config,
+ stream_in, flags, address,
+ source);
+}
+#else
+static int audio_open_input_stream(struct audio_hw_device *dev,
+ audio_io_handle_t handle,
+ audio_devices_t devices,
+ struct audio_config *config,
+ struct audio_stream_in **stream_in)
+{
+ return audio_open_input_stream_real(dev, handle, devices, config,
+ stream_in, 0, NULL, 0);
+}
+#endif
+
static void audio_close_input_stream(struct audio_hw_device *dev,
struct audio_stream_in *stream_in)
{
return -ENOSYS;
}
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+static int set_master_mute(struct audio_hw_device *dev, bool mute)
+{
+ DBG("");
+ return -ENOSYS;
+}
+
+static int get_master_mute(struct audio_hw_device *dev, bool *mute)
+{
+ DBG("");
+ return -ENOSYS;
+}
+
+static int create_audio_patch(struct audio_hw_device *dev,
+ unsigned int num_sources,
+ const struct audio_port_config *sources,
+ unsigned int num_sinks,
+ const struct audio_port_config *sinks,
+ audio_patch_handle_t *handle)
+{
+ DBG("");
+ return -ENOSYS;
+}
+
+static int release_audio_patch(struct audio_hw_device *dev,
+ audio_patch_handle_t handle)
+{
+ DBG("");
+ return -ENOSYS;
+}
+
+static int get_audio_port(struct audio_hw_device *dev, struct audio_port *port)
+{
+ DBG("");
+ return -ENOSYS;
+}
+
+static int set_audio_port_config(struct audio_hw_device *dev,
+ const struct audio_port_config *config)
+{
+ DBG("");
+ return -ENOSYS;
+}
+#endif
+
static int audio_close(hw_device_t *device)
{
struct a2dp_audio_dev *a2dp_dev = (struct a2dp_audio_dev *)device;
+ unsigned int i;
DBG("");
unregister_endpoints();
+ for (i = 0; i < NUM_CODECS; i++) {
+ const struct audio_codec *codec = audio_codecs[i].get_codec();
+
+ if (!audio_codecs[i].loaded)
+ continue;
+
+ if (codec->unload)
+ codec->unload();
+
+ audio_codecs[i].loaded = false;
+ }
+
shutdown(listen_sk, SHUT_RDWR);
shutdown(audio_sk, SHUT_RDWR);
/* Check if socket is still alive. Empty while loop.*/
while (poll(&pfd, 1, -1) < 0 && errno == EINTR);
- if (pfd.revents & (POLLHUP | POLLERR | POLLNVAL)) {
- info("Audio HAL: Socket closed");
+ info("Audio HAL: Socket closed");
- pthread_mutex_lock(&sk_mutex);
- close(audio_sk);
- audio_sk = -1;
- pthread_mutex_unlock(&sk_mutex);
- }
+ pthread_mutex_lock(&sk_mutex);
+ close(audio_sk);
+ audio_sk = -1;
+ pthread_mutex_unlock(&sk_mutex);
}
/* audio_sk is closed at this point, just cleanup endpoints states */
hw_device_t **device)
{
struct a2dp_audio_dev *a2dp_dev;
+ size_t i;
int err;
DBG("");
a2dp_dev->dev.open_input_stream = audio_open_input_stream;
a2dp_dev->dev.close_input_stream = audio_close_input_stream;
a2dp_dev->dev.dump = audio_dump;
+#if ANDROID_VERSION >= PLATFORM_VER(5, 0, 0)
+ a2dp_dev->dev.set_master_mute = set_master_mute;
+ a2dp_dev->dev.get_master_mute = get_master_mute;
+ a2dp_dev->dev.create_audio_patch = create_audio_patch;
+ a2dp_dev->dev.release_audio_patch = release_audio_patch;
+ a2dp_dev->dev.get_audio_port = get_audio_port;
+ a2dp_dev->dev.set_audio_port_config = set_audio_port_config;
+#endif
+
+ for (i = 0; i < NUM_CODECS; i++) {
+ const struct audio_codec *codec = audio_codecs[i].get_codec();
+
+ if (codec->load && !codec->load())
+ continue;
+
+ audio_codecs[i].loaded = true;
+ }
/*
* Note that &a2dp_dev->dev.common is the same pointer as a2dp_dev.