OSDN Git Service

am 4ddc4823: (-s ours) am 84d13eda: Add an explicit check for AVRCP vendor data
[android-x86/system-bt.git] / hci / src / hci_layer.c
1 /******************************************************************************
2  *
3  *  Copyright (C) 2014 Google, Inc.
4  *
5  *  Licensed under the Apache License, Version 2.0 (the "License");
6  *  you may not use this file except in compliance with the License.
7  *  You may obtain a copy of the License at:
8  *
9  *  http://www.apache.org/licenses/LICENSE-2.0
10  *
11  *  Unless required by applicable law or agreed to in writing, software
12  *  distributed under the License is distributed on an "AS IS" BASIS,
13  *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  *  See the License for the specific language governing permissions and
15  *  limitations under the License.
16  *
17  ******************************************************************************/
18
19 #define LOG_TAG "bt_hci"
20
21 #include "hci_layer.h"
22
23 #include <assert.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <string.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29
30 // TODO(armansito): cutils/properties.h is only being used to pull-in runtime
31 // settings on Android. Remove this conditional include once we have a generic
32 // way to obtain system properties.
33 #if !defined(OS_GENERIC)
34 #include <cutils/properties.h>
35 #endif  // !defined(OS_GENERIC)
36
37 #include "btcore/include/module.h"
38 #include "btsnoop.h"
39 #include "buffer_allocator.h"
40 #include "hci_hal.h"
41 #include "hci_inject.h"
42 #include "hci_internals.h"
43 #include "hcidefs.h"
44 #include "hcimsgs.h"
45 #include "low_power_manager.h"
46 #include "osi/include/list.h"
47 #include "osi/include/log.h"
48 #include "osi/include/non_repeating_timer.h"
49 #include "osi/include/reactor.h"
50 #include "packet_fragmenter.h"
51 #include "vendor.h"
52
53 // TODO(zachoverflow): remove this hack extern
54 #include <hardware/bluetooth.h>
55 bt_bdaddr_t btif_local_bd_addr;
56
57 #define INBOUND_PACKET_TYPE_COUNT 3
58 #define PACKET_TYPE_TO_INBOUND_INDEX(type) ((type) - 2)
59 #define PACKET_TYPE_TO_INDEX(type) ((type) - 1)
60
61 #define PREAMBLE_BUFFER_SIZE 4 // max preamble size, ACL
62 #define RETRIEVE_ACL_LENGTH(preamble) ((((preamble)[3]) << 8) | (preamble)[2])
63
64 static const uint8_t preamble_sizes[] = {
65   HCI_COMMAND_PREAMBLE_SIZE,
66   HCI_ACL_PREAMBLE_SIZE,
67   HCI_SCO_PREAMBLE_SIZE,
68   HCI_EVENT_PREAMBLE_SIZE
69 };
70
71 static const uint16_t outbound_event_types[] =
72 {
73   MSG_HC_TO_STACK_HCI_ERR,
74   MSG_HC_TO_STACK_HCI_ACL,
75   MSG_HC_TO_STACK_HCI_SCO,
76   MSG_HC_TO_STACK_HCI_EVT
77 };
78
79 typedef enum {
80   BRAND_NEW,
81   PREAMBLE,
82   BODY,
83   IGNORE,
84   FINISHED
85 } receive_state_t;
86
87 typedef struct {
88   receive_state_t state;
89   uint16_t bytes_remaining;
90   uint8_t preamble[PREAMBLE_BUFFER_SIZE];
91   uint16_t index;
92   BT_HDR *buffer;
93 } packet_receive_data_t;
94
95 typedef struct {
96   uint16_t opcode;
97   future_t *complete_future;
98   command_complete_cb complete_callback;
99   command_status_cb status_callback;
100   void *context;
101   BT_HDR *command;
102 } waiting_command_t;
103
104 // Using a define here, because it can be stringified for the property lookup
105 #define DEFAULT_STARTUP_TIMEOUT_MS 8000
106 #define STRING_VALUE_OF(x) #x
107
108 static const uint32_t EPILOG_TIMEOUT_MS = 3000;
109 static const uint32_t COMMAND_PENDING_TIMEOUT = 8000;
110
111 // Our interface
112 static bool interface_created;
113 static hci_t interface;
114
115 // Modules we import and callbacks we export
116 static const allocator_t *buffer_allocator;
117 static const btsnoop_t *btsnoop;
118 static const hci_hal_t *hal;
119 static const hci_hal_callbacks_t hal_callbacks;
120 static const hci_inject_t *hci_inject;
121 static const low_power_manager_t *low_power_manager;
122 static const packet_fragmenter_t *packet_fragmenter;
123 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks;
124 static const vendor_t *vendor;
125
126 static future_t *startup_future;
127 static thread_t *thread; // We own this
128
129 static volatile bool firmware_is_configured = false;
130 static non_repeating_timer_t *epilog_timer;
131 static non_repeating_timer_t *startup_timer;
132
133 // Outbound-related
134 static int command_credits = 1;
135 static fixed_queue_t *command_queue;
136 static fixed_queue_t *packet_queue;
137
138 // Inbound-related
139 static non_repeating_timer_t *command_response_timer;
140 static list_t *commands_pending_response;
141 static pthread_mutex_t commands_pending_response_lock;
142 static packet_receive_data_t incoming_packets[INBOUND_PACKET_TYPE_COUNT];
143
144 // The hand-off point for data going to a higher layer, set by the higher layer
145 static fixed_queue_t *upwards_data_queue;
146
147 static future_t *shut_down();
148
149 static void event_finish_startup(void *context);
150 static void firmware_config_callback(bool success);
151 static void startup_timer_expired(void *context);
152
153 static void event_postload(void *context);
154 static void sco_config_callback(bool success);
155
156 static void event_epilog(void *context);
157 static void epilog_finished_callback(bool success);
158 static void epilog_timer_expired(void *context);
159
160 static void event_command_ready(fixed_queue_t *queue, void *context);
161 static void event_packet_ready(fixed_queue_t *queue, void *context);
162 static void command_timed_out(void *context);
163
164 static void hal_says_data_ready(serial_data_type_t type);
165 static bool filter_incoming_event(BT_HDR *packet);
166
167 static serial_data_type_t event_to_data_type(uint16_t event);
168 static waiting_command_t *get_waiting_command(command_opcode_t opcode);
169
170 // Module lifecycle functions
171
172 static future_t *start_up(void) {
173   LOG_INFO(LOG_TAG, "%s", __func__);
174
175   // The host is only allowed to send at most one command initially,
176   // as per the Bluetooth spec, Volume 2, Part E, 4.4 (Command Flow Control)
177   // This value can change when you get a command complete or command status event.
178   command_credits = 1;
179   firmware_is_configured = false;
180
181   pthread_mutex_init(&commands_pending_response_lock, NULL);
182
183   // TODO(armansito): cutils/properties.h is only being used to pull-in runtime
184   // settings on Android. Remove this conditional include once we have a generic
185   // way to obtain system properties. For now, always use the default timeout on
186   // non-Android builds.
187   period_ms_t startup_timeout_ms = DEFAULT_STARTUP_TIMEOUT_MS;
188
189 #if !defined(OS_GENERIC)
190   // Grab the override startup timeout ms, if present.
191   char timeout_prop[PROPERTY_VALUE_MAX];
192   if (!property_get("bluetooth.enable_timeout_ms", timeout_prop, STRING_VALUE_OF(DEFAULT_STARTUP_TIMEOUT_MS))
193       || (startup_timeout_ms = atoi(timeout_prop)) < 100)
194     startup_timeout_ms = DEFAULT_STARTUP_TIMEOUT_MS;
195 #endif  // !defined(OS_GENERIC)
196
197   startup_timer = non_repeating_timer_new(startup_timeout_ms, startup_timer_expired, NULL);
198   if (!startup_timer) {
199     LOG_ERROR(LOG_TAG, "%s unable to create startup timer.", __func__);
200     goto error;
201   }
202
203   // Make sure we run in a bounded amount of time
204   non_repeating_timer_restart(startup_timer);
205
206   epilog_timer = non_repeating_timer_new(EPILOG_TIMEOUT_MS, epilog_timer_expired, NULL);
207   if (!epilog_timer) {
208     LOG_ERROR(LOG_TAG, "%s unable to create epilog timer.", __func__);
209     goto error;
210   }
211
212   command_response_timer = non_repeating_timer_new(COMMAND_PENDING_TIMEOUT, command_timed_out, NULL);
213   if (!command_response_timer) {
214     LOG_ERROR(LOG_TAG, "%s unable to create command response timer.", __func__);
215     goto error;
216   }
217
218   command_queue = fixed_queue_new(SIZE_MAX);
219   if (!command_queue) {
220     LOG_ERROR(LOG_TAG, "%s unable to create pending command queue.", __func__);
221     goto error;
222   }
223
224   packet_queue = fixed_queue_new(SIZE_MAX);
225   if (!packet_queue) {
226     LOG_ERROR(LOG_TAG, "%s unable to create pending packet queue.", __func__);
227     goto error;
228   }
229
230   thread = thread_new("hci_thread");
231   if (!thread) {
232     LOG_ERROR(LOG_TAG, "%s unable to create thread.", __func__);
233     goto error;
234   }
235
236   commands_pending_response = list_new(NULL);
237   if (!commands_pending_response) {
238     LOG_ERROR(LOG_TAG, "%s unable to create list for commands pending response.", __func__);
239     goto error;
240   }
241
242   memset(incoming_packets, 0, sizeof(incoming_packets));
243
244   packet_fragmenter->init(&packet_fragmenter_callbacks);
245
246   fixed_queue_register_dequeue(command_queue, thread_get_reactor(thread), event_command_ready, NULL);
247   fixed_queue_register_dequeue(packet_queue, thread_get_reactor(thread), event_packet_ready, NULL);
248
249   vendor->open(btif_local_bd_addr.address, &interface);
250   hal->init(&hal_callbacks, thread);
251   low_power_manager->init(thread);
252
253   vendor->set_callback(VENDOR_CONFIGURE_FIRMWARE, firmware_config_callback);
254   vendor->set_callback(VENDOR_CONFIGURE_SCO, sco_config_callback);
255   vendor->set_callback(VENDOR_DO_EPILOG, epilog_finished_callback);
256
257   if (!hci_inject->open(&interface)) {
258     // TODO(sharvil): gracefully propagate failures from this layer.
259   }
260
261   int power_state = BT_VND_PWR_OFF;
262 #if (defined (BT_CLEAN_TURN_ON_DISABLED) && BT_CLEAN_TURN_ON_DISABLED == TRUE)
263   LOG_WARN(LOG_TAG, "%s not turning off the chip before turning on.", __func__);
264   // So apparently this hack was needed in the past because a Wingray kernel driver
265   // didn't handle power off commands in a powered off state correctly.
266
267   // The comment in the old code said the workaround should be removed when the
268   // problem was fixed. Sadly, I have no idea if said bug was fixed or if said
269   // kernel is still in use, so we must leave this here for posterity. #sadpanda
270 #else
271   // cycle power on the chip to ensure it has been reset
272   vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
273 #endif
274   power_state = BT_VND_PWR_ON;
275   vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
276
277   startup_future = future_new();
278   LOG_DEBUG(LOG_TAG, "%s starting async portion", __func__);
279   thread_post(thread, event_finish_startup, NULL);
280   return startup_future;
281 error:;
282   shut_down(); // returns NULL so no need to wait for it
283   return future_new_immediate(FUTURE_FAIL);
284 }
285
286 static future_t *shut_down() {
287   LOG_INFO(LOG_TAG, "%s", __func__);
288
289   hci_inject->close();
290
291   if (thread) {
292     if (firmware_is_configured) {
293       non_repeating_timer_restart(epilog_timer);
294       thread_post(thread, event_epilog, NULL);
295     } else {
296       thread_stop(thread);
297     }
298
299     thread_join(thread);
300   }
301
302   fixed_queue_free(command_queue, osi_free);
303   fixed_queue_free(packet_queue, buffer_allocator->free);
304   list_free(commands_pending_response);
305
306   pthread_mutex_destroy(&commands_pending_response_lock);
307
308   packet_fragmenter->cleanup();
309
310   non_repeating_timer_free(epilog_timer);
311   non_repeating_timer_free(command_response_timer);
312   non_repeating_timer_free(startup_timer);
313
314   epilog_timer = NULL;
315   command_response_timer = NULL;
316
317   low_power_manager->cleanup();
318   hal->close();
319
320   // Turn off the chip
321   int power_state = BT_VND_PWR_OFF;
322   vendor->send_command(VENDOR_CHIP_POWER_CONTROL, &power_state);
323   vendor->close();
324
325   thread_free(thread);
326   thread = NULL;
327   firmware_is_configured = false;
328
329   return NULL;
330 }
331
332 EXPORT_SYMBOL const module_t hci_module = {
333   .name = HCI_MODULE,
334   .init = NULL,
335   .start_up = start_up,
336   .shut_down = shut_down,
337   .clean_up = NULL,
338   .dependencies = {
339     BTSNOOP_MODULE,
340     NULL
341   }
342 };
343
344 // Interface functions
345
346 static void do_postload() {
347   LOG_DEBUG(LOG_TAG, "%s posting postload work item", __func__);
348   thread_post(thread, event_postload, NULL);
349 }
350
351 static void set_data_queue(fixed_queue_t *queue) {
352   upwards_data_queue = queue;
353 }
354
355 static void transmit_command(
356     BT_HDR *command,
357     command_complete_cb complete_callback,
358     command_status_cb status_callback,
359     void *context) {
360   waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
361   if (!wait_entry) {
362     LOG_ERROR(LOG_TAG, "%s couldn't allocate space for wait entry.", __func__);
363     return;
364   }
365
366   uint8_t *stream = command->data + command->offset;
367   STREAM_TO_UINT16(wait_entry->opcode, stream);
368   wait_entry->complete_callback = complete_callback;
369   wait_entry->status_callback = status_callback;
370   wait_entry->command = command;
371   wait_entry->context = context;
372
373   // Store the command message type in the event field
374   // in case the upper layer didn't already
375   command->event = MSG_STACK_TO_HC_HCI_CMD;
376
377   fixed_queue_enqueue(command_queue, wait_entry);
378 }
379
380 static future_t *transmit_command_futured(BT_HDR *command) {
381   waiting_command_t *wait_entry = osi_calloc(sizeof(waiting_command_t));
382   assert(wait_entry != NULL);
383
384   future_t *future = future_new();
385
386   uint8_t *stream = command->data + command->offset;
387   STREAM_TO_UINT16(wait_entry->opcode, stream);
388   wait_entry->complete_future = future;
389   wait_entry->command = command;
390
391   // Store the command message type in the event field
392   // in case the upper layer didn't already
393   command->event = MSG_STACK_TO_HC_HCI_CMD;
394
395   fixed_queue_enqueue(command_queue, wait_entry);
396   return future;
397 }
398
399 static void transmit_downward(data_dispatcher_type_t type, void *data) {
400   if (type == MSG_STACK_TO_HC_HCI_CMD) {
401     // TODO(zachoverflow): eliminate this call
402     transmit_command((BT_HDR *)data, NULL, NULL, NULL);
403     LOG_WARN(LOG_TAG, "%s legacy transmit of command. Use transmit_command instead.", __func__);
404   } else {
405     fixed_queue_enqueue(packet_queue, data);
406   }
407 }
408
409 // Start up functions
410
411 static void event_finish_startup(UNUSED_ATTR void *context) {
412   LOG_INFO(LOG_TAG, "%s", __func__);
413   hal->open();
414   vendor->send_async_command(VENDOR_CONFIGURE_FIRMWARE, NULL);
415 }
416
417 static void firmware_config_callback(UNUSED_ATTR bool success) {
418   LOG_INFO(LOG_TAG, "%s", __func__);
419   firmware_is_configured = true;
420   non_repeating_timer_cancel(startup_timer);
421
422   future_ready(startup_future, FUTURE_SUCCESS);
423   startup_future = NULL;
424 }
425
426 static void startup_timer_expired(UNUSED_ATTR void *context) {
427   LOG_ERROR(LOG_TAG, "%s", __func__);
428   future_ready(startup_future, FUTURE_FAIL);
429   startup_future = NULL;
430 }
431
432 // Postload functions
433
434 static void event_postload(UNUSED_ATTR void *context) {
435   LOG_INFO(LOG_TAG, "%s", __func__);
436   if(vendor->send_async_command(VENDOR_CONFIGURE_SCO, NULL) == -1) {
437     // If couldn't configure sco, we won't get the sco configuration callback
438     // so go pretend to do it now
439     sco_config_callback(false);
440
441   }
442 }
443
444 static void sco_config_callback(UNUSED_ATTR bool success) {
445   LOG_INFO(LOG_TAG, "%s postload finished.", __func__);
446 }
447
448 // Epilog functions
449
450 static void event_epilog(UNUSED_ATTR void *context) {
451   vendor->send_async_command(VENDOR_DO_EPILOG, NULL);
452 }
453
454 static void epilog_finished_callback(UNUSED_ATTR bool success) {
455   LOG_INFO(LOG_TAG, "%s", __func__);
456   thread_stop(thread);
457 }
458
459 static void epilog_timer_expired(UNUSED_ATTR void *context) {
460   LOG_INFO(LOG_TAG, "%s", __func__);
461   thread_stop(thread);
462 }
463
464 // Command/packet transmitting functions
465
466 static void event_command_ready(fixed_queue_t *queue, UNUSED_ATTR void *context) {
467   if (command_credits > 0) {
468     waiting_command_t *wait_entry = fixed_queue_dequeue(queue);
469     command_credits--;
470
471     // Move it to the list of commands awaiting response
472     pthread_mutex_lock(&commands_pending_response_lock);
473     list_append(commands_pending_response, wait_entry);
474     pthread_mutex_unlock(&commands_pending_response_lock);
475
476     // Send it off
477     low_power_manager->wake_assert();
478     packet_fragmenter->fragment_and_dispatch(wait_entry->command);
479     low_power_manager->transmit_done();
480
481     non_repeating_timer_restart_if(command_response_timer, !list_is_empty(commands_pending_response));
482   }
483 }
484
485 static void event_packet_ready(fixed_queue_t *queue, UNUSED_ATTR void *context) {
486   // The queue may be the command queue or the packet queue, we don't care
487   BT_HDR *packet = (BT_HDR *)fixed_queue_dequeue(queue);
488
489   low_power_manager->wake_assert();
490   packet_fragmenter->fragment_and_dispatch(packet);
491   low_power_manager->transmit_done();
492 }
493
494 // Callback for the fragmenter to send a fragment
495 static void transmit_fragment(BT_HDR *packet, bool send_transmit_finished) {
496   uint16_t event = packet->event & MSG_EVT_MASK;
497   serial_data_type_t type = event_to_data_type(event);
498
499   btsnoop->capture(packet, false);
500   hal->transmit_data(type, packet->data + packet->offset, packet->len);
501
502   if (event != MSG_STACK_TO_HC_HCI_CMD && send_transmit_finished)
503     buffer_allocator->free(packet);
504 }
505
506 static void fragmenter_transmit_finished(BT_HDR *packet, bool all_fragments_sent) {
507   if (all_fragments_sent) {
508     buffer_allocator->free(packet);
509   } else {
510     // This is kind of a weird case, since we're dispatching a partially sent packet
511     // up to a higher layer.
512     // TODO(zachoverflow): rework upper layer so this isn't necessary.
513     data_dispatcher_dispatch(interface.event_dispatcher, packet->event & MSG_EVT_MASK, packet);
514   }
515 }
516
517 static void command_timed_out(UNUSED_ATTR void *context) {
518   pthread_mutex_lock(&commands_pending_response_lock);
519
520   if (list_is_empty(commands_pending_response)) {
521     LOG_ERROR(LOG_TAG, "%s with no commands pending response", __func__);
522   } else {
523     waiting_command_t *wait_entry = list_front(commands_pending_response);
524     pthread_mutex_unlock(&commands_pending_response_lock);
525
526     // We shouldn't try to recover the stack from this command timeout.
527     // If it's caused by a software bug, fix it. If it's a hardware bug, fix it.
528     LOG_ERROR(LOG_TAG, "%s hci layer timeout waiting for response to a command. opcode: 0x%x", __func__, wait_entry->opcode);
529   }
530
531   LOG_ERROR(LOG_TAG, "%s restarting the bluetooth process.", __func__);
532   usleep(10000);
533   kill(getpid(), SIGKILL);
534 }
535
536 // Event/packet receiving functions
537
538 // This function is not required to read all of a packet in one go, so
539 // be wary of reentry. But this function must return after finishing a packet.
540 static void hal_says_data_ready(serial_data_type_t type) {
541   packet_receive_data_t *incoming = &incoming_packets[PACKET_TYPE_TO_INBOUND_INDEX(type)];
542
543   uint8_t byte;
544   while (hal->read_data(type, &byte, 1, false) != 0) {
545     switch (incoming->state) {
546       case BRAND_NEW:
547         // Initialize and prepare to jump to the preamble reading state
548         incoming->bytes_remaining = preamble_sizes[PACKET_TYPE_TO_INDEX(type)];
549         memset(incoming->preamble, 0, PREAMBLE_BUFFER_SIZE);
550         incoming->index = 0;
551         incoming->state = PREAMBLE;
552         // INTENTIONAL FALLTHROUGH
553       case PREAMBLE:
554         incoming->preamble[incoming->index] = byte;
555         incoming->index++;
556         incoming->bytes_remaining--;
557
558         if (incoming->bytes_remaining == 0) {
559           // For event and sco preambles, the last byte we read is the length
560           incoming->bytes_remaining = (type == DATA_TYPE_ACL) ? RETRIEVE_ACL_LENGTH(incoming->preamble) : byte;
561
562           size_t buffer_size = BT_HDR_SIZE + incoming->index + incoming->bytes_remaining;
563           incoming->buffer = (BT_HDR *)buffer_allocator->alloc(buffer_size);
564
565           if (!incoming->buffer) {
566             LOG_ERROR(LOG_TAG, "%s error getting buffer for incoming packet of type %d and size %zd", __func__, type, buffer_size);
567             // Can't read any more of this current packet, so jump out
568             incoming->state = incoming->bytes_remaining == 0 ? BRAND_NEW : IGNORE;
569             break;
570           }
571
572           // Initialize the buffer
573           incoming->buffer->offset = 0;
574           incoming->buffer->layer_specific = 0;
575           incoming->buffer->event = outbound_event_types[PACKET_TYPE_TO_INDEX(type)];
576           memcpy(incoming->buffer->data, incoming->preamble, incoming->index);
577
578           incoming->state = incoming->bytes_remaining > 0 ? BODY : FINISHED;
579         }
580
581         break;
582       case BODY:
583         incoming->buffer->data[incoming->index] = byte;
584         incoming->index++;
585         incoming->bytes_remaining--;
586
587         size_t bytes_read = hal->read_data(type, (incoming->buffer->data + incoming->index), incoming->bytes_remaining, false);
588         incoming->index += bytes_read;
589         incoming->bytes_remaining -= bytes_read;
590
591         incoming->state = incoming->bytes_remaining == 0 ? FINISHED : incoming->state;
592         break;
593       case IGNORE:
594         incoming->bytes_remaining--;
595         if (incoming->bytes_remaining == 0) {
596           incoming->state = BRAND_NEW;
597           // Don't forget to let the hal know we finished the packet we were ignoring.
598           // Otherwise we'll get out of sync with hals that embed extra information
599           // in the uart stream (like H4). #badnewsbears
600           hal->packet_finished(type);
601           return;
602         }
603
604         break;
605       case FINISHED:
606         LOG_ERROR(LOG_TAG, "%s the state machine should not have been left in the finished state.", __func__);
607         break;
608     }
609
610     if (incoming->state == FINISHED) {
611       incoming->buffer->len = incoming->index;
612       btsnoop->capture(incoming->buffer, true);
613
614       if (type != DATA_TYPE_EVENT) {
615         packet_fragmenter->reassemble_and_dispatch(incoming->buffer);
616       } else if (!filter_incoming_event(incoming->buffer)) {
617         // Dispatch the event by event code
618         uint8_t *stream = incoming->buffer->data;
619         uint8_t event_code;
620         STREAM_TO_UINT8(event_code, stream);
621
622         data_dispatcher_dispatch(
623           interface.event_dispatcher,
624           event_code,
625           incoming->buffer
626         );
627       }
628
629       // We don't control the buffer anymore
630       incoming->buffer = NULL;
631       incoming->state = BRAND_NEW;
632       hal->packet_finished(type);
633
634       // We return after a packet is finished for two reasons:
635       // 1. The type of the next packet could be different.
636       // 2. We don't want to hog cpu time.
637       return;
638     }
639   }
640 }
641
642 // Returns true if the event was intercepted and should not proceed to
643 // higher layers. Also inspects an incoming event for interesting
644 // information, like how many commands are now able to be sent.
645 static bool filter_incoming_event(BT_HDR *packet) {
646   waiting_command_t *wait_entry = NULL;
647   uint8_t *stream = packet->data;
648   uint8_t event_code;
649   command_opcode_t opcode;
650
651   STREAM_TO_UINT8(event_code, stream);
652   STREAM_SKIP_UINT8(stream); // Skip the parameter total length field
653
654   if (event_code == HCI_COMMAND_COMPLETE_EVT) {
655     STREAM_TO_UINT8(command_credits, stream);
656     STREAM_TO_UINT16(opcode, stream);
657
658     wait_entry = get_waiting_command(opcode);
659     if (!wait_entry)
660       LOG_WARN(LOG_TAG, "%s command complete event with no matching command. opcode: 0x%x.", __func__, opcode);
661     else if (wait_entry->complete_callback)
662       wait_entry->complete_callback(packet, wait_entry->context);
663     else if (wait_entry->complete_future)
664       future_ready(wait_entry->complete_future, packet);
665
666     goto intercepted;
667   } else if (event_code == HCI_COMMAND_STATUS_EVT) {
668     uint8_t status;
669     STREAM_TO_UINT8(status, stream);
670     STREAM_TO_UINT8(command_credits, stream);
671     STREAM_TO_UINT16(opcode, stream);
672
673     // If a command generates a command status event, it won't be getting a command complete event
674
675     wait_entry = get_waiting_command(opcode);
676     if (!wait_entry)
677       LOG_WARN(LOG_TAG, "%s command status event with no matching command. opcode: 0x%x", __func__, opcode);
678     else if (wait_entry->status_callback)
679       wait_entry->status_callback(status, wait_entry->command, wait_entry->context);
680
681     goto intercepted;
682   }
683
684   return false;
685 intercepted:;
686   non_repeating_timer_restart_if(command_response_timer, !list_is_empty(commands_pending_response));
687
688   if (wait_entry) {
689     // If it has a callback, it's responsible for freeing the packet
690     if (event_code == HCI_COMMAND_STATUS_EVT || (!wait_entry->complete_callback && !wait_entry->complete_future))
691       buffer_allocator->free(packet);
692
693     // If it has a callback, it's responsible for freeing the command
694     if (event_code == HCI_COMMAND_COMPLETE_EVT || !wait_entry->status_callback)
695       buffer_allocator->free(wait_entry->command);
696
697     osi_free(wait_entry);
698   } else {
699     buffer_allocator->free(packet);
700   }
701
702   return true;
703 }
704
705 // Callback for the fragmenter to dispatch up a completely reassembled packet
706 static void dispatch_reassembled(BT_HDR *packet) {
707   // Events should already have been dispatched before this point
708   assert((packet->event & MSG_EVT_MASK) != MSG_HC_TO_STACK_HCI_EVT);
709   assert(upwards_data_queue != NULL);
710
711   if (upwards_data_queue) {
712     fixed_queue_enqueue(upwards_data_queue, packet);
713   } else {
714     LOG_ERROR(LOG_TAG, "%s had no queue to place upwards data packet in. Dropping it on the floor.", __func__);
715     buffer_allocator->free(packet);
716   }
717 }
718
719 // Misc internal functions
720
721 // TODO(zachoverflow): we seem to do this a couple places, like the HCI inject module. #centralize
722 static serial_data_type_t event_to_data_type(uint16_t event) {
723   if (event == MSG_STACK_TO_HC_HCI_ACL)
724     return DATA_TYPE_ACL;
725   else if (event == MSG_STACK_TO_HC_HCI_SCO)
726     return DATA_TYPE_SCO;
727   else if (event == MSG_STACK_TO_HC_HCI_CMD)
728     return DATA_TYPE_COMMAND;
729   else
730     LOG_ERROR(LOG_TAG, "%s invalid event type, could not translate 0x%x", __func__, event);
731
732   return 0;
733 }
734
735 static waiting_command_t *get_waiting_command(command_opcode_t opcode) {
736   pthread_mutex_lock(&commands_pending_response_lock);
737
738   for (const list_node_t *node = list_begin(commands_pending_response);
739       node != list_end(commands_pending_response);
740       node = list_next(node)) {
741     waiting_command_t *wait_entry = list_node(node);
742
743     if (!wait_entry || wait_entry->opcode != opcode)
744       continue;
745
746     list_remove(commands_pending_response, wait_entry);
747
748     pthread_mutex_unlock(&commands_pending_response_lock);
749     return wait_entry;
750   }
751
752   pthread_mutex_unlock(&commands_pending_response_lock);
753   return NULL;
754 }
755
756 static void init_layer_interface() {
757   if (!interface_created) {
758     interface.send_low_power_command = low_power_manager->post_command;
759     interface.do_postload = do_postload;
760
761     // It's probably ok for this to live forever. It's small and
762     // there's only one instance of the hci interface.
763     interface.event_dispatcher = data_dispatcher_new("hci_layer");
764     if (!interface.event_dispatcher) {
765       LOG_ERROR(LOG_TAG, "%s could not create upward dispatcher.", __func__);
766       return;
767     }
768
769     interface.set_data_queue = set_data_queue;
770     interface.transmit_command = transmit_command;
771     interface.transmit_command_futured = transmit_command_futured;
772     interface.transmit_downward = transmit_downward;
773     interface_created = true;
774   }
775 }
776
777 static const hci_hal_callbacks_t hal_callbacks = {
778   hal_says_data_ready
779 };
780
781 static const packet_fragmenter_callbacks_t packet_fragmenter_callbacks = {
782   transmit_fragment,
783   dispatch_reassembled,
784   fragmenter_transmit_finished
785 };
786
787 const hci_t *hci_layer_get_interface() {
788   buffer_allocator = buffer_allocator_get_interface();
789   hal = hci_hal_get_interface();
790   btsnoop = btsnoop_get_interface();
791   hci_inject = hci_inject_get_interface();
792   packet_fragmenter = packet_fragmenter_get_interface();
793   vendor = vendor_get_interface();
794   low_power_manager = low_power_manager_get_interface();
795
796   init_layer_interface();
797   return &interface;
798 }
799
800 const hci_t *hci_layer_get_test_interface(
801     const allocator_t *buffer_allocator_interface,
802     const hci_hal_t *hal_interface,
803     const btsnoop_t *btsnoop_interface,
804     const hci_inject_t *hci_inject_interface,
805     const packet_fragmenter_t *packet_fragmenter_interface,
806     const vendor_t *vendor_interface,
807     const low_power_manager_t *low_power_manager_interface) {
808
809   buffer_allocator = buffer_allocator_interface;
810   hal = hal_interface;
811   btsnoop = btsnoop_interface;
812   hci_inject = hci_inject_interface;
813   packet_fragmenter = packet_fragmenter_interface;
814   vendor = vendor_interface;
815   low_power_manager = low_power_manager_interface;
816
817   init_layer_interface();
818   return &interface;
819 }