1 /******************************************************************************
3 * Copyright (C) 2014 Google, Inc.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 ******************************************************************************/
19 #define LOG_TAG "bt_hci"
22 #include <cutils/properties.h>
24 #include "buffer_allocator.h"
26 #include "osi/include/fixed_queue.h"
27 #include "osi/include/future.h"
31 #include "hci_internals.h"
32 #include "hci_inject.h"
33 #include "hci_layer.h"
34 #include "osi/include/list.h"
35 #include "low_power_manager.h"
36 #include "btcore/include/module.h"
37 #include "osi/include/non_repeating_timer.h"
38 #include "osi/include/osi.h"
39 #include "osi/include/log.h"
40 #include "packet_fragmenter.h"
41 #include "osi/include/reactor.h"
44 // TODO(zachoverflow): remove this hack extern
45 #include <hardware/bluetooth.h>
46 bt_bdaddr_t btif_local_bd_addr;
48 #define INBOUND_PACKET_TYPE_COUNT 3
49 #define PACKET_TYPE_TO_INBOUND_INDEX(type) ((type) - 2)
50 #define PACKET_TYPE_TO_INDEX(type) ((type) - 1)
52 #define PREAMBLE_BUFFER_SIZE 4 // max preamble size, ACL
53 #define RETRIEVE_ACL_LENGTH(preamble) ((((preamble)[3]) << 8) | (preamble)[2])
55 static const uint8_t preamble_sizes[] = {
56 HCI_COMMAND_PREAMBLE_SIZE,
57 HCI_ACL_PREAMBLE_SIZE,
58 HCI_SCO_PREAMBLE_SIZE,
59 HCI_EVENT_PREAMBLE_SIZE
62 static const uint16_t outbound_event_types[] =
64 MSG_HC_TO_STACK_HCI_ERR,
65 MSG_HC_TO_STACK_HCI_ACL,
66 MSG_HC_TO_STACK_HCI_SCO,
67 MSG_HC_TO_STACK_HCI_EVT
79 receive_state_t state;
80 uint16_t bytes_remaining;
81 uint8_t preamble[PREAMBLE_BUFFER_SIZE];
84 } packet_receive_data_t;
88 future_t *complete_future;
89 command_complete_cb complete_callback;
90 command_status_cb status_callback;
95 // Using a define here, because it can be stringified for the property lookup
96 #define DEFAULT_STARTUP_TIMEOUT_MS 8000
97 #define STRING_VALUE_OF(x) #x
99 static const uint32_t EPILOG_TIMEOUT_MS = 3000;
100 static const uint32_t COMMAND_PENDING_TIMEOUT = 8000;
103 static bool interface_created;
104 static hci_t interface;
106 // Modules we import and callbacks we export
107 static const allocator_t *buffer_allocator;
108 static const btsnoop_t *btsnoop;
109 static const hci_hal_t *hal;
110 static const hci_hal_callbacks_t hal_callbacks;
111 static const hci_inject_t *hci_inject;
112 static const low_power_manager_t *low_power_manager;
113 static const packet_fragmenter_t *packet_fragmenter;
114 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks;
115 static const vendor_t *vendor;
117 static future_t *startup_future;
118 static thread_t *thread; // We own this
120 static volatile bool firmware_is_configured = false;
121 static non_repeating_timer_t *epilog_timer;
122 static non_repeating_timer_t *startup_timer;
125 static int command_credits = 1;
126 static fixed_queue_t *command_queue;
127 static fixed_queue_t *packet_queue;
130 static non_repeating_timer_t *command_response_timer;
131 static list_t *commands_pending_response;
132 static pthread_mutex_t commands_pending_response_lock;
133 static packet_receive_data_t incoming_packets[INBOUND_PACKET_TYPE_COUNT];
135 // The hand-off point for data going to a higher layer, set by the higher layer
136 static fixed_queue_t *upwards_data_queue;
138 static future_t *shut_down();
140 static void event_finish_startup(void *context);
141 static void firmware_config_callback(bool success);
142 static void startup_timer_expired(void *context);
144 static void event_postload(void *context);
145 static void sco_config_callback(bool success);
147 static void event_epilog(void *context);
148 static void epilog_finished_callback(bool success);
149 static void epilog_timer_expired(void *context);
151 static void event_command_ready(fixed_queue_t *queue, void *context);
152 static void event_packet_ready(fixed_queue_t *queue, void *context);
153 static void command_timed_out(void *context);
155 static void hal_says_data_ready(serial_data_type_t type);
156 static bool filter_incoming_event(BT_HDR *packet);
158 static serial_data_type_t event_to_data_type(uint16_t event);
159 static waiting_command_t *get_waiting_command(command_opcode_t opcode);
161 // Module lifecycle functions
163 static future_t *start_up(void) {
164 LOG_INFO("%s", __func__);
166 // The host is only allowed to send at most one command initially,
167 // as per the Bluetooth spec, Volume 2, Part E, 4.4 (Command Flow Control)
168 // This value can change when you get a command complete or command status event.
170 firmware_is_configured = false;
172 pthread_mutex_init(&commands_pending_response_lock, NULL);
174 // Grab the override startup timeout ms, if present.
175 period_ms_t startup_timeout_ms;
176 char timeout_prop[PROPERTY_VALUE_MAX];
177 if (!property_get("bluetooth.enable_timeout_ms", timeout_prop, STRING_VALUE_OF(DEFAULT_STARTUP_TIMEOUT_MS))
178 || (startup_timeout_ms = atoi(timeout_prop)) < 100)
179 startup_timeout_ms = DEFAULT_STARTUP_TIMEOUT_MS;
181 startup_timer = non_repeating_timer_new(startup_timeout_ms, startup_timer_expired, NULL);
182 if (!startup_timer) {
183 LOG_ERROR("%s unable to create startup timer.", __func__);
187 // Make sure we run in a bounded amount of time
188 non_repeating_timer_restart(startup_timer);
190 epilog_timer = non_repeating_timer_new(EPILOG_TIMEOUT_MS, epilog_timer_expired, NULL);
192 LOG_ERROR("%s unable to create epilog timer.", __func__);
196 command_response_timer = non_repeating_timer_new(COMMAND_PENDING_TIMEOUT, command_timed_out, NULL);
197 if (!command_response_timer) {
198 LOG_ERROR("%s unable to create command response timer.", __func__);
202 command_queue = fixed_queue_new(SIZE_MAX);
203 if (!command_queue) {
204 LOG_ERROR("%s unable to create pending command queue.", __func__);
208 packet_queue = fixed_queue_new(SIZE_MAX);
210 LOG_ERROR("%s unable to create pending packet queue.", __func__);
214 thread = thread_new("hci_thread");
216 LOG_ERROR("%s unable to create thread.", __func__);
220 commands_pending_response = list_new(NULL);
221 if (!commands_pending_response) {
222 LOG_ERROR("%s unable to create list for commands pending response.", __func__);
226 memset(incoming_packets, 0, sizeof(incoming_packets));
228 packet_fragmenter->init(&packet_fragmenter_callbacks);
230 fixed_queue_register_dequeue(command_queue, thread_get_reactor(thread), event_command_ready, NULL);
231 fixed_queue_register_dequeue(packet_queue, thread_get_reactor(thread), event_packet_ready, NULL);
233 vendor->open(btif_local_bd_addr.address, &interface);
234 hal->init(&hal_callbacks, thread);
235 low_power_manager->init(thread);
237 vendor->set_callback(VENDOR_CONFIGURE_FIRMWARE, firmware_config_callback);
238 vendor->set_callback(VENDOR_CONFIGURE_SCO, sco_config_callback);
239 vendor->set_callback(VENDOR_DO_EPILOG, epilog_finished_callback);
241 if (!hci_inject->open(&interface)) {
242 // TODO(sharvil): gracefully propagate failures from this layer.
245 int power_state = BT_VND_PWR_OFF;
246 #if (defined (BT_CLEAN_TURN_ON_DISABLED) && BT_CLEAN_TURN_ON_DISABLED == TRUE)
247 LOG_WARN("%s not turning off the chip before turning on.", __func__);
248 // So apparently this hack was needed in the past because a Wingray kernel driver
249 // didn't handle power off commands in a powered off state correctly.
251 // The comment in the old code said the workaround should be removed when the
252 // problem was fixed. Sadly, I have no idea if said bug was fixed or if said
253 // kernel is still in use, so we must leave this here for posterity. #sadpanda
255 // cycle power on the chip to ensure it has been reset
256 vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
258 power_state = BT_VND_PWR_ON;
259 vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
261 startup_future = future_new();
262 LOG_DEBUG("%s starting async portion", __func__);
263 thread_post(thread, event_finish_startup, NULL);
264 return startup_future;
266 shut_down(); // returns NULL so no need to wait for it
267 return future_new_immediate(FUTURE_FAIL);
270 static future_t *shut_down() {
271 LOG_INFO("%s", __func__);
276 if (firmware_is_configured) {
277 non_repeating_timer_restart(epilog_timer);
278 thread_post(thread, event_epilog, NULL);
286 fixed_queue_free(command_queue, buffer_allocator->free);
287 fixed_queue_free(packet_queue, buffer_allocator->free);
288 list_free(commands_pending_response);
290 pthread_mutex_destroy(&commands_pending_response_lock);
292 packet_fragmenter->cleanup();
294 non_repeating_timer_free(epilog_timer);
295 non_repeating_timer_free(command_response_timer);
296 non_repeating_timer_free(startup_timer);
299 command_response_timer = NULL;
301 low_power_manager->cleanup();
305 int power_state = BT_VND_PWR_OFF;
306 vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
311 firmware_is_configured = false;
316 const module_t hci_module = {
319 .start_up = start_up,
320 .shut_down = shut_down,
328 // Interface functions
330 static void do_postload() {
331 LOG_DEBUG("%s posting postload work item", __func__);
332 thread_post(thread, event_postload, NULL);
335 static void set_data_queue(fixed_queue_t *queue) {
336 upwards_data_queue = queue;
339 static void transmit_command(
341 command_complete_cb complete_callback,
342 command_status_cb status_callback,
344 waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
346 LOG_ERROR("%s couldn't allocate space for wait entry.", __func__);
350 uint8_t *stream = command->data + command->offset;
351 STREAM_TO_UINT16(wait_entry->opcode, stream);
352 wait_entry->complete_callback = complete_callback;
353 wait_entry->status_callback = status_callback;
354 wait_entry->command = command;
355 wait_entry->context = context;
357 // Store the command message type in the event field
358 // in case the upper layer didn't already
359 command->event = MSG_STACK_TO_HC_HCI_CMD;
361 fixed_queue_enqueue(command_queue, wait_entry);
364 static future_t *transmit_command_futured(BT_HDR *command) {
365 waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
366 assert(wait_entry != NULL);
368 future_t *future = future_new();
370 uint8_t *stream = command->data + command->offset;
371 STREAM_TO_UINT16(wait_entry->opcode, stream);
372 wait_entry->complete_future = future;
373 wait_entry->command = command;
375 // Store the command message type in the event field
376 // in case the upper layer didn't already
377 command->event = MSG_STACK_TO_HC_HCI_CMD;
379 fixed_queue_enqueue(command_queue, wait_entry);
383 static void transmit_downward(data_dispatcher_type_t type, void *data) {
384 if (type == MSG_STACK_TO_HC_HCI_CMD) {
385 // TODO(zachoverflow): eliminate this call
386 transmit_command((BT_HDR *)data, NULL, NULL, NULL);
387 LOG_WARN("%s legacy transmit of command. Use transmit_command instead.", __func__);
389 fixed_queue_enqueue(packet_queue, data);
393 // Start up functions
395 static void event_finish_startup(UNUSED_ATTR void *context) {
396 LOG_INFO("%s", __func__);
398 vendor->send_async_command(VENDOR_CONFIGURE_FIRMWARE, NULL);
401 static void firmware_config_callback(UNUSED_ATTR bool success) {
402 LOG_INFO("%s", __func__);
403 firmware_is_configured = true;
404 non_repeating_timer_cancel(startup_timer);
406 future_ready(startup_future, FUTURE_SUCCESS);
407 startup_future = NULL;
410 static void startup_timer_expired(UNUSED_ATTR void *context) {
411 LOG_ERROR("%s", __func__);
412 future_ready(startup_future, FUTURE_FAIL);
413 startup_future = NULL;
416 // Postload functions
418 static void event_postload(UNUSED_ATTR void *context) {
419 LOG_INFO("%s", __func__);
420 if(vendor->send_async_command(VENDOR_CONFIGURE_SCO, NULL) == -1) {
421 // If couldn't configure sco, we won't get the sco configuration callback
422 // so go pretend to do it now
423 sco_config_callback(false);
428 static void sco_config_callback(UNUSED_ATTR bool success) {
429 LOG_INFO("%s postload finished.", __func__);
434 static void event_epilog(UNUSED_ATTR void *context) {
435 vendor->send_async_command(VENDOR_DO_EPILOG, NULL);
438 static void epilog_finished_callback(UNUSED_ATTR bool success) {
439 LOG_INFO("%s", __func__);
443 static void epilog_timer_expired(UNUSED_ATTR void *context) {
444 LOG_INFO("%s", __func__);
448 // Command/packet transmitting functions
450 static void event_command_ready(fixed_queue_t *queue, UNUSED_ATTR void *context) {
451 if (command_credits > 0) {
452 waiting_command_t *wait_entry = fixed_queue_dequeue(queue);
455 // Move it to the list of commands awaiting response
456 pthread_mutex_lock(&commands_pending_response_lock);
457 list_append(commands_pending_response, wait_entry);
458 pthread_mutex_unlock(&commands_pending_response_lock);
461 low_power_manager->wake_assert();
462 packet_fragmenter->fragment_and_dispatch(wait_entry->command);
463 low_power_manager->transmit_done();
465 non_repeating_timer_restart_if(command_response_timer, !list_is_empty(commands_pending_response));
469 static void event_packet_ready(fixed_queue_t *queue, UNUSED_ATTR void *context) {
470 // The queue may be the command queue or the packet queue, we don't care
471 BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue);
473 low_power_manager->wake_assert();
474 packet_fragmenter->fragment_and_dispatch(packet);
475 low_power_manager->transmit_done();
478 // Callback for the fragmenter to send a fragment
479 static void transmit_fragment(BT_HDR *packet, bool send_transmit_finished) {
480 uint16_t event = packet->event & MSG_EVT_MASK;
481 serial_data_type_t type = event_to_data_type(event);
483 btsnoop->capture(packet, false);
484 hal->transmit_data(type, packet->data + packet->offset, packet->len);
486 if (event != MSG_STACK_TO_HC_HCI_CMD && send_transmit_finished)
487 buffer_allocator->free(packet);
490 static void fragmenter_transmit_finished(BT_HDR *packet, bool all_fragments_sent) {
491 if (all_fragments_sent) {
492 buffer_allocator->free(packet);
494 // This is kind of a weird case, since we're dispatching a partially sent packet
495 // up to a higher layer.
496 // TODO(zachoverflow): rework upper layer so this isn't necessary.
497 data_dispatcher_dispatch(interface.event_dispatcher, packet->event & MSG_EVT_MASK, packet);
501 static void command_timed_out(UNUSED_ATTR void *context) {
502 pthread_mutex_lock(&commands_pending_response_lock);
504 if (list_is_empty(commands_pending_response)) {
505 LOG_ERROR("%s with no commands pending response", __func__);
507 waiting_command_t *wait_entry = list_front(commands_pending_response);
508 pthread_mutex_unlock(&commands_pending_response_lock);
510 // We shouldn't try to recover the stack from this command timeout.
511 // If it's caused by a software bug, fix it. If it's a hardware bug, fix it.
512 LOG_ERROR("%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, wait_entry->opcode);
515 LOG_ERROR("%s restarting the bluetooth process.", __func__);
517 kill(getpid(), SIGKILL);
520 // Event/packet receiving functions
522 // This function is not required to read all of a packet in one go, so
523 // be wary of reentry. But this function must return after finishing a packet.
524 static void hal_says_data_ready(serial_data_type_t type) {
525 packet_receive_data_t *incoming = &incoming_packets[PACKET_TYPE_TO_INBOUND_INDEX(type)];
528 while (hal->read_data(type, &byte, 1, false) != 0) {
529 switch (incoming->state) {
531 // Initialize and prepare to jump to the preamble reading state
532 incoming->bytes_remaining = preamble_sizes[PACKET_TYPE_TO_INDEX(type)];
533 memset(incoming->preamble, 0, PREAMBLE_BUFFER_SIZE);
535 incoming->state = PREAMBLE;
536 // INTENTIONAL FALLTHROUGH
538 incoming->preamble[incoming->index] = byte;
540 incoming->bytes_remaining--;
542 if (incoming->bytes_remaining == 0) {
543 // For event and sco preambles, the last byte we read is the length
544 incoming->bytes_remaining = (type == DATA_TYPE_ACL) ? RETRIEVE_ACL_LENGTH(incoming->preamble) : byte;
546 size_t buffer_size = BT_HDR_SIZE + incoming->index + incoming->bytes_remaining;
547 incoming->buffer = (BT_HDR *)buffer_allocator->alloc(buffer_size);
549 if (!incoming->buffer) {
550 LOG_ERROR("%s error getting buffer for incoming packet of type %d and size %d", __func__, type, buffer_size);
551 // Can't read any more of this current packet, so jump out
552 incoming->state = incoming->bytes_remaining == 0 ? BRAND_NEW : IGNORE;
556 // Initialize the buffer
557 incoming->buffer->offset = 0;
558 incoming->buffer->layer_specific = 0;
559 incoming->buffer->event = outbound_event_types[PACKET_TYPE_TO_INDEX(type)];
560 memcpy(incoming->buffer->data, incoming->preamble, incoming->index);
562 incoming->state = incoming->bytes_remaining > 0 ? BODY : FINISHED;
567 incoming->buffer->data[incoming->index] = byte;
569 incoming->bytes_remaining--;
571 size_t bytes_read = hal->read_data(type, (incoming->buffer->data + incoming->index), incoming->bytes_remaining, false);
572 incoming->index += bytes_read;
573 incoming->bytes_remaining -= bytes_read;
575 incoming->state = incoming->bytes_remaining == 0 ? FINISHED : incoming->state;
578 incoming->bytes_remaining--;
579 if (incoming->bytes_remaining == 0) {
580 incoming->state = BRAND_NEW;
581 // Don't forget to let the hal know we finished the packet we were ignoring.
582 // Otherwise we'll get out of sync with hals that embed extra information
583 // in the uart stream (like H4). #badnewsbears
584 hal->packet_finished(type);
590 LOG_ERROR("%s the state machine should not have been left in the finished state.", __func__);
594 if (incoming->state == FINISHED) {
595 incoming->buffer->len = incoming->index;
596 btsnoop->capture(incoming->buffer, true);
598 if (type != DATA_TYPE_EVENT) {
599 packet_fragmenter->reassemble_and_dispatch(incoming->buffer);
600 } else if (!filter_incoming_event(incoming->buffer)) {
601 // Dispatch the event by event code
602 uint8_t *stream = incoming->buffer->data;
604 STREAM_TO_UINT8(event_code, stream);
606 data_dispatcher_dispatch(
607 interface.event_dispatcher,
613 // We don't control the buffer anymore
614 incoming->buffer = NULL;
615 incoming->state = BRAND_NEW;
616 hal->packet_finished(type);
618 // We return after a packet is finished for two reasons:
619 // 1. The type of the next packet could be different.
620 // 2. We don't want to hog cpu time.
626 // Returns true if the event was intercepted and should not proceed to
627 // higher layers. Also inspects an incoming event for interesting
628 // information, like how many commands are now able to be sent.
629 static bool filter_incoming_event(BT_HDR *packet) {
630 waiting_command_t *wait_entry = NULL;
631 uint8_t *stream = packet->data;
633 command_opcode_t opcode;
635 STREAM_TO_UINT8(event_code, stream);
636 STREAM_SKIP_UINT8(stream); // Skip the parameter total length field
638 if (event_code == HCI_COMMAND_COMPLETE_EVT) {
639 STREAM_TO_UINT8(command_credits, stream);
640 STREAM_TO_UINT16(opcode, stream);
642 wait_entry = get_waiting_command(opcode);
644 LOG_WARN("%s command complete event with no matching command. opcode: 0x%x.", __func__, opcode);
645 else if (wait_entry->complete_callback)
646 wait_entry->complete_callback(packet, wait_entry->context);
647 else if (wait_entry->complete_future)
648 future_ready(wait_entry->complete_future, packet);
651 } else if (event_code == HCI_COMMAND_STATUS_EVT) {
653 STREAM_TO_UINT8(status, stream);
654 STREAM_TO_UINT8(command_credits, stream);
655 STREAM_TO_UINT16(opcode, stream);
657 // If a command generates a command status event, it won't be getting a command complete event
659 wait_entry = get_waiting_command(opcode);
661 LOG_WARN("%s command status event with no matching command. opcode: 0x%x", __func__, opcode);
662 else if (wait_entry->status_callback)
663 wait_entry->status_callback(status, wait_entry->command, wait_entry->context);
670 non_repeating_timer_restart_if(command_response_timer, !list_is_empty(commands_pending_response));
673 // If it has a callback, it's responsible for freeing the packet
674 if (event_code == HCI_COMMAND_STATUS_EVT || (!wait_entry->complete_callback && !wait_entry->complete_future))
675 buffer_allocator->free(packet);
677 // If it has a callback, it's responsible for freeing the command
678 if (event_code == HCI_COMMAND_COMPLETE_EVT || !wait_entry->status_callback)
679 buffer_allocator->free(wait_entry->command);
681 osi_free(wait_entry);
683 buffer_allocator->free(packet);
689 // Callback for the fragmenter to dispatch up a completely reassembled packet
690 static void dispatch_reassembled(BT_HDR *packet) {
691 // Events should already have been dispatched before this point
692 assert((packet->event & MSG_EVT_MASK) != MSG_HC_TO_STACK_HCI_EVT);
693 assert(upwards_data_queue != NULL);
695 if (upwards_data_queue) {
696 fixed_queue_enqueue(upwards_data_queue, packet);
698 LOG_ERROR("%s had no queue to place upwards data packet in. Dropping it on the floor.", __func__);
699 buffer_allocator->free(packet);
703 // Misc internal functions
705 // TODO(zachoverflow): we seem to do this a couple places, like the HCI inject module. #centralize
706 static serial_data_type_t event_to_data_type(uint16_t event) {
707 if (event == MSG_STACK_TO_HC_HCI_ACL)
708 return DATA_TYPE_ACL;
709 else if (event == MSG_STACK_TO_HC_HCI_SCO)
710 return DATA_TYPE_SCO;
711 else if (event == MSG_STACK_TO_HC_HCI_CMD)
712 return DATA_TYPE_COMMAND;
714 LOG_ERROR("%s invalid event type, could not translate 0x%x", __func__, event);
719 static waiting_command_t *get_waiting_command(command_opcode_t opcode) {
720 pthread_mutex_lock(&commands_pending_response_lock);
722 for (const list_node_t *node = list_begin(commands_pending_response);
723 node != list_end(commands_pending_response);
724 node = list_next(node)) {
725 waiting_command_t *wait_entry = list_node(node);
727 if (!wait_entry || wait_entry->opcode != opcode)
730 list_remove(commands_pending_response, wait_entry);
732 pthread_mutex_unlock(&commands_pending_response_lock);
736 pthread_mutex_unlock(&commands_pending_response_lock);
740 static void init_layer_interface() {
741 if (!interface_created) {
742 interface.send_low_power_command = low_power_manager->post_command;
743 interface.do_postload = do_postload;
745 // It's probably ok for this to live forever. It's small and
746 // there's only one instance of the hci interface.
747 interface.event_dispatcher = data_dispatcher_new("hci_layer");
748 if (!interface.event_dispatcher) {
749 LOG_ERROR("%s could not create upward dispatcher.", __func__);
753 interface.set_data_queue = set_data_queue;
754 interface.transmit_command = transmit_command;
755 interface.transmit_command_futured = transmit_command_futured;
756 interface.transmit_downward = transmit_downward;
757 interface_created = true;
761 static const hci_hal_callbacks_t hal_callbacks = {
765 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks = {
767 dispatch_reassembled,
768 fragmenter_transmit_finished
771 const hci_t *hci_layer_get_interface() {
772 buffer_allocator = buffer_allocator_get_interface();
773 hal = hci_hal_get_interface();
774 btsnoop = btsnoop_get_interface();
775 hci_inject = hci_inject_get_interface();
776 packet_fragmenter = packet_fragmenter_get_interface();
777 vendor = vendor_get_interface();
778 low_power_manager = low_power_manager_get_interface();
780 init_layer_interface();
784 const hci_t *hci_layer_get_test_interface(
785 const allocator_t *buffer_allocator_interface,
786 const hci_hal_t *hal_interface,
787 const btsnoop_t *btsnoop_interface,
788 const hci_inject_t *hci_inject_interface,
789 const packet_fragmenter_t *packet_fragmenter_interface,
790 const vendor_t *vendor_interface,
791 const low_power_manager_t *low_power_manager_interface) {
793 buffer_allocator = buffer_allocator_interface;
795 btsnoop = btsnoop_interface;
796 hci_inject = hci_inject_interface;
797 packet_fragmenter = packet_fragmenter_interface;
798 vendor = vendor_interface;
799 low_power_manager = low_power_manager_interface;
801 init_layer_interface();