2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
30 #include <linux/rfkill.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
35 #define AUTO_OFF_TIMEOUT 2000
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* ---- HCI notifications ---- */
51 static void hci_notify(struct hci_dev *hdev, int event)
53 hci_sock_dev_event(hdev, event);
56 /* ---- HCI requests ---- */
58 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
60 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
62 /* If this is the init phase check if the completed command matches
63 * the last init command, and if not just return.
65 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
66 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
67 u16 opcode = __le16_to_cpu(sent->opcode);
70 /* Some CSR based controllers generate a spontaneous
71 * reset complete event during init and any pending
72 * command will never be completed. In such a case we
73 * need to resend whatever was the last sent
77 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
82 skb_queue_head(&hdev->cmd_q, skb);
83 queue_work(hdev->workqueue, &hdev->cmd_work);
89 if (hdev->req_status == HCI_REQ_PEND) {
90 hdev->req_result = result;
91 hdev->req_status = HCI_REQ_DONE;
92 wake_up_interruptible(&hdev->req_wait_q);
96 static void hci_req_cancel(struct hci_dev *hdev, int err)
98 BT_DBG("%s err 0x%2.2x", hdev->name, err);
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = err;
102 hdev->req_status = HCI_REQ_CANCELED;
103 wake_up_interruptible(&hdev->req_wait_q);
107 /* Execute request and wait for completion. */
108 static int __hci_request(struct hci_dev *hdev,
109 void (*req)(struct hci_dev *hdev, unsigned long opt),
110 unsigned long opt, __u32 timeout)
112 DECLARE_WAITQUEUE(wait, current);
115 BT_DBG("%s start", hdev->name);
117 hdev->req_status = HCI_REQ_PEND;
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
123 schedule_timeout(timeout);
125 remove_wait_queue(&hdev->req_wait_q, &wait);
127 if (signal_pending(current))
130 switch (hdev->req_status) {
132 err = -bt_to_errno(hdev->req_result);
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
144 hdev->req_status = hdev->req_result = 0;
146 BT_DBG("%s end: err %d", hdev->name, err);
151 static int hci_request(struct hci_dev *hdev,
152 void (*req)(struct hci_dev *hdev, unsigned long opt),
153 unsigned long opt, __u32 timeout)
157 if (!test_bit(HCI_UP, &hdev->flags))
160 /* Serialize all requests */
162 ret = __hci_request(hdev, req, opt, timeout);
163 hci_req_unlock(hdev);
168 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
170 BT_DBG("%s %ld", hdev->name, opt);
173 set_bit(HCI_RESET, &hdev->flags);
174 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 static void bredr_init(struct hci_dev *hdev)
179 struct hci_cp_delete_stored_link_key cp;
183 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
185 /* Mandatory initialization */
188 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 /* Read Local Supported Features */
194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
196 /* Read Local Version */
197 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
199 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
202 /* Read BD Address */
203 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
205 /* Read Class of Device */
206 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
208 /* Read Local Name */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
211 /* Read Voice Setting */
212 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
214 /* Optional initialization */
216 /* Clear Event Filters */
217 flt_type = HCI_FLT_CLEAR_ALL;
218 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
220 /* Connection accept timeout ~20 secs */
221 param = __constant_cpu_to_le16(0x7d00);
222 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
224 bacpy(&cp.bdaddr, BDADDR_ANY);
226 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
229 static void amp_init(struct hci_dev *hdev)
231 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
234 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
236 /* Read Local Version */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
239 /* Read Local AMP Info */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
243 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
247 BT_DBG("%s %ld", hdev->name, opt);
249 /* Driver initialization */
251 /* Special commands */
252 while ((skb = skb_dequeue(&hdev->driver_init))) {
253 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
254 skb->dev = (void *) hdev;
256 skb_queue_tail(&hdev->cmd_q, skb);
257 queue_work(hdev->workqueue, &hdev->cmd_work);
259 skb_queue_purge(&hdev->driver_init);
261 switch (hdev->dev_type) {
271 BT_ERR("Unknown device type %d", hdev->dev_type);
277 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
279 BT_DBG("%s", hdev->name);
281 /* Read LE buffer size */
282 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
285 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
289 BT_DBG("%s %x", hdev->name, scan);
291 /* Inquiry and Page scans */
292 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
295 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
299 BT_DBG("%s %x", hdev->name, auth);
302 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
305 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
309 BT_DBG("%s %x", hdev->name, encrypt);
312 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
315 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
317 __le16 policy = cpu_to_le16(opt);
319 BT_DBG("%s %x", hdev->name, policy);
321 /* Default link policy */
322 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
325 /* Get HCI device by index.
326 * Device is held on return. */
327 struct hci_dev *hci_dev_get(int index)
329 struct hci_dev *hdev = NULL, *d;
336 read_lock(&hci_dev_list_lock);
337 list_for_each_entry(d, &hci_dev_list, list) {
338 if (d->id == index) {
339 hdev = hci_dev_hold(d);
343 read_unlock(&hci_dev_list_lock);
347 /* ---- Inquiry support ---- */
349 bool hci_discovery_active(struct hci_dev *hdev)
351 struct discovery_state *discov = &hdev->discovery;
353 switch (discov->state) {
354 case DISCOVERY_FINDING:
355 case DISCOVERY_RESOLVING:
363 void hci_discovery_set_state(struct hci_dev *hdev, int state)
365 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
367 if (hdev->discovery.state == state)
371 case DISCOVERY_STOPPED:
372 if (hdev->discovery.state != DISCOVERY_STARTING)
373 mgmt_discovering(hdev, 0);
375 case DISCOVERY_STARTING:
377 case DISCOVERY_FINDING:
378 mgmt_discovering(hdev, 1);
380 case DISCOVERY_RESOLVING:
382 case DISCOVERY_STOPPING:
386 hdev->discovery.state = state;
389 static void inquiry_cache_flush(struct hci_dev *hdev)
391 struct discovery_state *cache = &hdev->discovery;
392 struct inquiry_entry *p, *n;
394 list_for_each_entry_safe(p, n, &cache->all, all) {
399 INIT_LIST_HEAD(&cache->unknown);
400 INIT_LIST_HEAD(&cache->resolve);
403 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
406 struct discovery_state *cache = &hdev->discovery;
407 struct inquiry_entry *e;
409 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
411 list_for_each_entry(e, &cache->all, all) {
412 if (!bacmp(&e->data.bdaddr, bdaddr))
419 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
422 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e;
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
427 list_for_each_entry(e, &cache->unknown, list) {
428 if (!bacmp(&e->data.bdaddr, bdaddr))
435 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
439 struct discovery_state *cache = &hdev->discovery;
440 struct inquiry_entry *e;
442 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
444 list_for_each_entry(e, &cache->resolve, list) {
445 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
447 if (!bacmp(&e->data.bdaddr, bdaddr))
454 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
455 struct inquiry_entry *ie)
457 struct discovery_state *cache = &hdev->discovery;
458 struct list_head *pos = &cache->resolve;
459 struct inquiry_entry *p;
463 list_for_each_entry(p, &cache->resolve, list) {
464 if (p->name_state != NAME_PENDING &&
465 abs(p->data.rssi) >= abs(ie->data.rssi))
470 list_add(&ie->list, pos);
473 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
474 bool name_known, bool *ssp)
476 struct discovery_state *cache = &hdev->discovery;
477 struct inquiry_entry *ie;
479 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
482 *ssp = data->ssp_mode;
484 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
486 if (ie->data.ssp_mode && ssp)
489 if (ie->name_state == NAME_NEEDED &&
490 data->rssi != ie->data.rssi) {
491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
503 list_add(&ie->all, &cache->all);
506 ie->name_state = NAME_KNOWN;
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
513 if (name_known && ie->name_state != NAME_KNOWN &&
514 ie->name_state != NAME_PENDING) {
515 ie->name_state = NAME_KNOWN;
519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
521 cache->timestamp = jiffies;
523 if (ie->name_state == NAME_NOT_KNOWN)
529 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
531 struct discovery_state *cache = &hdev->discovery;
532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
536 list_for_each_entry(e, &cache->all, all) {
537 struct inquiry_data *data = &e->data;
542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
553 BT_DBG("cache %p, copied %d", cache, copied);
557 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
562 BT_DBG("%s", hdev->name);
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
574 int hci_inquiry(void __user *arg)
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
586 hdev = hci_dev_get(ir.dev_id);
591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
592 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
593 inquiry_cache_flush(hdev);
596 hci_dev_unlock(hdev);
598 timeo = ir.length * msecs_to_jiffies(2000);
601 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 /* for unlimited number of responses we will use buffer with
609 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
611 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
612 * copy it to the user space.
614 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
621 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
622 hci_dev_unlock(hdev);
624 BT_DBG("num_rsp %d", ir.num_rsp);
626 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
628 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 /* ---- HCI ioctl helpers ---- */
643 int hci_dev_open(__u16 dev)
645 struct hci_dev *hdev;
648 hdev = hci_dev_get(dev);
652 BT_DBG("%s %p", hdev->name, hdev);
656 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 if (test_bit(HCI_UP, &hdev->flags)) {
671 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
672 set_bit(HCI_RAW, &hdev->flags);
674 /* Treat all non BR/EDR controllers as raw devices if
675 enable_hs is not set */
676 if (hdev->dev_type != HCI_BREDR && !enable_hs)
677 set_bit(HCI_RAW, &hdev->flags);
679 if (hdev->open(hdev)) {
684 if (!test_bit(HCI_RAW, &hdev->flags)) {
685 atomic_set(&hdev->cmd_cnt, 1);
686 set_bit(HCI_INIT, &hdev->flags);
687 hdev->init_last_cmd = 0;
689 ret = __hci_request(hdev, hci_init_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 if (lmp_host_le_capable(hdev))
693 ret = __hci_request(hdev, hci_le_init_req, 0,
694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 clear_bit(HCI_INIT, &hdev->flags);
701 set_bit(HCI_UP, &hdev->flags);
702 hci_notify(hdev, HCI_DEV_UP);
703 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
705 mgmt_powered(hdev, 1);
706 hci_dev_unlock(hdev);
709 /* Init failed, cleanup */
710 flush_work(&hdev->tx_work);
711 flush_work(&hdev->cmd_work);
712 flush_work(&hdev->rx_work);
714 skb_queue_purge(&hdev->cmd_q);
715 skb_queue_purge(&hdev->rx_q);
720 if (hdev->sent_cmd) {
721 kfree_skb(hdev->sent_cmd);
722 hdev->sent_cmd = NULL;
730 hci_req_unlock(hdev);
735 static int hci_dev_do_close(struct hci_dev *hdev)
737 BT_DBG("%s %p", hdev->name, hdev);
739 cancel_work_sync(&hdev->le_scan);
741 hci_req_cancel(hdev, ENODEV);
744 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
745 del_timer_sync(&hdev->cmd_timer);
746 hci_req_unlock(hdev);
750 /* Flush RX and TX works */
751 flush_work(&hdev->tx_work);
752 flush_work(&hdev->rx_work);
754 if (hdev->discov_timeout > 0) {
755 cancel_delayed_work(&hdev->discov_off);
756 hdev->discov_timeout = 0;
757 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
760 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
761 cancel_delayed_work(&hdev->service_cache);
763 cancel_delayed_work_sync(&hdev->le_scan_disable);
766 inquiry_cache_flush(hdev);
767 hci_conn_hash_flush(hdev);
768 hci_dev_unlock(hdev);
770 hci_notify(hdev, HCI_DEV_DOWN);
776 skb_queue_purge(&hdev->cmd_q);
777 atomic_set(&hdev->cmd_cnt, 1);
778 if (!test_bit(HCI_RAW, &hdev->flags) &&
779 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
780 set_bit(HCI_INIT, &hdev->flags);
781 __hci_request(hdev, hci_reset_req, 0,
782 msecs_to_jiffies(250));
783 clear_bit(HCI_INIT, &hdev->flags);
787 flush_work(&hdev->cmd_work);
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
796 del_timer_sync(&hdev->cmd_timer);
797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
807 mgmt_powered(hdev, 0);
808 hci_dev_unlock(hdev);
814 memset(hdev->eir, 0, sizeof(hdev->eir));
815 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
817 hci_req_unlock(hdev);
823 int hci_dev_close(__u16 dev)
825 struct hci_dev *hdev;
828 hdev = hci_dev_get(dev);
832 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
833 cancel_delayed_work(&hdev->power_off);
835 err = hci_dev_do_close(hdev);
841 int hci_dev_reset(__u16 dev)
843 struct hci_dev *hdev;
846 hdev = hci_dev_get(dev);
852 if (!test_bit(HCI_UP, &hdev->flags))
856 skb_queue_purge(&hdev->rx_q);
857 skb_queue_purge(&hdev->cmd_q);
860 inquiry_cache_flush(hdev);
861 hci_conn_hash_flush(hdev);
862 hci_dev_unlock(hdev);
867 atomic_set(&hdev->cmd_cnt, 1);
868 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
870 if (!test_bit(HCI_RAW, &hdev->flags))
871 ret = __hci_request(hdev, hci_reset_req, 0,
872 msecs_to_jiffies(HCI_INIT_TIMEOUT));
875 hci_req_unlock(hdev);
880 int hci_dev_reset_stat(__u16 dev)
882 struct hci_dev *hdev;
885 hdev = hci_dev_get(dev);
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
896 int hci_dev_cmd(unsigned int cmd, void __user *arg)
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
902 if (copy_from_user(&dr, arg, sizeof(dr)))
905 hdev = hci_dev_get(dr.dev_id);
911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
912 msecs_to_jiffies(HCI_INIT_TIMEOUT));
916 if (!lmp_encrypt_capable(hdev)) {
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
930 msecs_to_jiffies(HCI_INIT_TIMEOUT));
934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
935 msecs_to_jiffies(HCI_INIT_TIMEOUT));
939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
940 msecs_to_jiffies(HCI_INIT_TIMEOUT));
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
949 hdev->pkt_type = (__u16) dr.dev_opt;
953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
971 int hci_get_dev_list(void __user *arg)
973 struct hci_dev *hdev;
974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
976 int n = 0, size, err;
979 if (get_user(dev_num, (__u16 __user *) arg))
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
987 dl = kzalloc(size, GFP_KERNEL);
993 read_lock(&hci_dev_list_lock);
994 list_for_each_entry(hdev, &hci_dev_list, list) {
995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
996 cancel_delayed_work(&hdev->power_off);
998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
1007 read_unlock(&hci_dev_list_lock);
1010 size = sizeof(*dl) + n * sizeof(*dr);
1012 err = copy_to_user(arg, dl, size);
1015 return err ? -EFAULT : 0;
1018 int hci_get_dev_info(void __user *arg)
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1024 if (copy_from_user(&di, arg, sizeof(di)))
1027 hdev = hci_dev_get(di.dev_id);
1031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1032 cancel_delayed_work_sync(&hdev->power_off);
1034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
1039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1052 if (copy_to_user(arg, &di, sizeof(di)))
1060 /* ---- Interface to HCI drivers ---- */
1062 static int hci_rfkill_set_block(void *data, bool blocked)
1064 struct hci_dev *hdev = data;
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1071 hci_dev_do_close(hdev);
1076 static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1080 static void hci_power_on(struct work_struct *work)
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084 BT_DBG("%s", hdev->name);
1086 if (hci_dev_open(hdev->id) < 0)
1089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1090 schedule_delayed_work(&hdev->power_off,
1091 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1093 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1094 mgmt_index_added(hdev);
1097 static void hci_power_off(struct work_struct *work)
1099 struct hci_dev *hdev = container_of(work, struct hci_dev,
1102 BT_DBG("%s", hdev->name);
1104 hci_dev_do_close(hdev);
1107 static void hci_discov_off(struct work_struct *work)
1109 struct hci_dev *hdev;
1110 u8 scan = SCAN_PAGE;
1112 hdev = container_of(work, struct hci_dev, discov_off.work);
1114 BT_DBG("%s", hdev->name);
1118 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1120 hdev->discov_timeout = 0;
1122 hci_dev_unlock(hdev);
1125 int hci_uuids_clear(struct hci_dev *hdev)
1127 struct list_head *p, *n;
1129 list_for_each_safe(p, n, &hdev->uuids) {
1130 struct bt_uuid *uuid;
1132 uuid = list_entry(p, struct bt_uuid, list);
1141 int hci_link_keys_clear(struct hci_dev *hdev)
1143 struct list_head *p, *n;
1145 list_for_each_safe(p, n, &hdev->link_keys) {
1146 struct link_key *key;
1148 key = list_entry(p, struct link_key, list);
1157 int hci_smp_ltks_clear(struct hci_dev *hdev)
1159 struct smp_ltk *k, *tmp;
1161 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1169 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1173 list_for_each_entry(k, &hdev->link_keys, list)
1174 if (bacmp(bdaddr, &k->bdaddr) == 0)
1180 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1181 u8 key_type, u8 old_key_type)
1184 if (key_type < 0x03)
1187 /* Debug keys are insecure so don't store them persistently */
1188 if (key_type == HCI_LK_DEBUG_COMBINATION)
1191 /* Changed combination key and there's no previous one */
1192 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1195 /* Security mode 3 case */
1199 /* Neither local nor remote side had no-bonding as requirement */
1200 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1203 /* Local side had dedicated bonding as requirement */
1204 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1207 /* Remote side had dedicated bonding as requirement */
1208 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1211 /* If none of the above criteria match, then don't store the key
1216 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1220 list_for_each_entry(k, &hdev->long_term_keys, list) {
1221 if (k->ediv != ediv ||
1222 memcmp(rand, k->rand, sizeof(k->rand)))
1231 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1236 list_for_each_entry(k, &hdev->long_term_keys, list)
1237 if (addr_type == k->bdaddr_type &&
1238 bacmp(bdaddr, &k->bdaddr) == 0)
1244 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1245 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1247 struct link_key *key, *old_key;
1251 old_key = hci_find_link_key(hdev, bdaddr);
1253 old_key_type = old_key->type;
1256 old_key_type = conn ? conn->key_type : 0xff;
1257 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1260 list_add(&key->list, &hdev->link_keys);
1263 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1265 /* Some buggy controller combinations generate a changed
1266 * combination key for legacy pairing even when there's no
1268 if (type == HCI_LK_CHANGED_COMBINATION &&
1269 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1270 type = HCI_LK_COMBINATION;
1272 conn->key_type = type;
1275 bacpy(&key->bdaddr, bdaddr);
1276 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1277 key->pin_len = pin_len;
1279 if (type == HCI_LK_CHANGED_COMBINATION)
1280 key->type = old_key_type;
1287 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1289 mgmt_new_link_key(hdev, key, persistent);
1292 conn->flush_key = !persistent;
1297 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1298 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1301 struct smp_ltk *key, *old_key;
1303 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1306 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1310 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1313 list_add(&key->list, &hdev->long_term_keys);
1316 bacpy(&key->bdaddr, bdaddr);
1317 key->bdaddr_type = addr_type;
1318 memcpy(key->val, tk, sizeof(key->val));
1319 key->authenticated = authenticated;
1321 key->enc_size = enc_size;
1323 memcpy(key->rand, rand, sizeof(key->rand));
1328 if (type & HCI_SMP_LTK)
1329 mgmt_new_ltk(hdev, key, 1);
1334 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336 struct link_key *key;
1338 key = hci_find_link_key(hdev, bdaddr);
1342 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1344 list_del(&key->list);
1350 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1352 struct smp_ltk *k, *tmp;
1354 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1355 if (bacmp(bdaddr, &k->bdaddr))
1358 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1367 /* HCI command timer function */
1368 static void hci_cmd_timer(unsigned long arg)
1370 struct hci_dev *hdev = (void *) arg;
1372 BT_ERR("%s command tx timeout", hdev->name);
1373 atomic_set(&hdev->cmd_cnt, 1);
1374 queue_work(hdev->workqueue, &hdev->cmd_work);
1377 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1380 struct oob_data *data;
1382 list_for_each_entry(data, &hdev->remote_oob_data, list)
1383 if (bacmp(bdaddr, &data->bdaddr) == 0)
1389 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391 struct oob_data *data;
1393 data = hci_find_remote_oob_data(hdev, bdaddr);
1397 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1399 list_del(&data->list);
1405 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1407 struct oob_data *data, *n;
1409 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1410 list_del(&data->list);
1417 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1420 struct oob_data *data;
1422 data = hci_find_remote_oob_data(hdev, bdaddr);
1425 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1429 bacpy(&data->bdaddr, bdaddr);
1430 list_add(&data->list, &hdev->remote_oob_data);
1433 memcpy(data->hash, hash, sizeof(data->hash));
1434 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1436 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1441 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1443 struct bdaddr_list *b;
1445 list_for_each_entry(b, &hdev->blacklist, list)
1446 if (bacmp(bdaddr, &b->bdaddr) == 0)
1452 int hci_blacklist_clear(struct hci_dev *hdev)
1454 struct list_head *p, *n;
1456 list_for_each_safe(p, n, &hdev->blacklist) {
1457 struct bdaddr_list *b;
1459 b = list_entry(p, struct bdaddr_list, list);
1468 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1470 struct bdaddr_list *entry;
1472 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1475 if (hci_blacklist_lookup(hdev, bdaddr))
1478 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1482 bacpy(&entry->bdaddr, bdaddr);
1484 list_add(&entry->list, &hdev->blacklist);
1486 return mgmt_device_blocked(hdev, bdaddr, type);
1489 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1491 struct bdaddr_list *entry;
1493 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1494 return hci_blacklist_clear(hdev);
1496 entry = hci_blacklist_lookup(hdev, bdaddr);
1500 list_del(&entry->list);
1503 return mgmt_device_unblocked(hdev, bdaddr, type);
1506 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1508 struct le_scan_params *param = (struct le_scan_params *) opt;
1509 struct hci_cp_le_set_scan_param cp;
1511 memset(&cp, 0, sizeof(cp));
1512 cp.type = param->type;
1513 cp.interval = cpu_to_le16(param->interval);
1514 cp.window = cpu_to_le16(param->window);
1516 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1519 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1521 struct hci_cp_le_set_scan_enable cp;
1523 memset(&cp, 0, sizeof(cp));
1526 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1529 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1530 u16 window, int timeout)
1532 long timeo = msecs_to_jiffies(3000);
1533 struct le_scan_params param;
1536 BT_DBG("%s", hdev->name);
1538 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1539 return -EINPROGRESS;
1542 param.interval = interval;
1543 param.window = window;
1547 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1550 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1552 hci_req_unlock(hdev);
1557 schedule_delayed_work(&hdev->le_scan_disable,
1558 msecs_to_jiffies(timeout));
1563 int hci_cancel_le_scan(struct hci_dev *hdev)
1565 BT_DBG("%s", hdev->name);
1567 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1570 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1571 struct hci_cp_le_set_scan_enable cp;
1573 /* Send HCI command to disable LE Scan */
1574 memset(&cp, 0, sizeof(cp));
1575 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1581 static void le_scan_disable_work(struct work_struct *work)
1583 struct hci_dev *hdev = container_of(work, struct hci_dev,
1584 le_scan_disable.work);
1585 struct hci_cp_le_set_scan_enable cp;
1587 BT_DBG("%s", hdev->name);
1589 memset(&cp, 0, sizeof(cp));
1591 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594 static void le_scan_work(struct work_struct *work)
1596 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1597 struct le_scan_params *param = &hdev->le_scan_params;
1599 BT_DBG("%s", hdev->name);
1601 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1605 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1608 struct le_scan_params *param = &hdev->le_scan_params;
1610 BT_DBG("%s", hdev->name);
1612 if (work_busy(&hdev->le_scan))
1613 return -EINPROGRESS;
1616 param->interval = interval;
1617 param->window = window;
1618 param->timeout = timeout;
1620 queue_work(system_long_wq, &hdev->le_scan);
1625 /* Alloc HCI device */
1626 struct hci_dev *hci_alloc_dev(void)
1628 struct hci_dev *hdev;
1630 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1634 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1635 hdev->esco_type = (ESCO_HV1);
1636 hdev->link_mode = (HCI_LM_ACCEPT);
1637 hdev->io_capability = 0x03; /* No Input No Output */
1639 hdev->sniff_max_interval = 800;
1640 hdev->sniff_min_interval = 80;
1642 mutex_init(&hdev->lock);
1643 mutex_init(&hdev->req_lock);
1645 INIT_LIST_HEAD(&hdev->mgmt_pending);
1646 INIT_LIST_HEAD(&hdev->blacklist);
1647 INIT_LIST_HEAD(&hdev->uuids);
1648 INIT_LIST_HEAD(&hdev->link_keys);
1649 INIT_LIST_HEAD(&hdev->long_term_keys);
1650 INIT_LIST_HEAD(&hdev->remote_oob_data);
1652 INIT_WORK(&hdev->rx_work, hci_rx_work);
1653 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1654 INIT_WORK(&hdev->tx_work, hci_tx_work);
1655 INIT_WORK(&hdev->power_on, hci_power_on);
1656 INIT_WORK(&hdev->le_scan, le_scan_work);
1658 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1659 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1660 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1662 skb_queue_head_init(&hdev->driver_init);
1663 skb_queue_head_init(&hdev->rx_q);
1664 skb_queue_head_init(&hdev->cmd_q);
1665 skb_queue_head_init(&hdev->raw_q);
1667 init_waitqueue_head(&hdev->req_wait_q);
1669 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1671 hci_init_sysfs(hdev);
1672 discovery_init(hdev);
1673 hci_conn_hash_init(hdev);
1677 EXPORT_SYMBOL(hci_alloc_dev);
1679 /* Free HCI device */
1680 void hci_free_dev(struct hci_dev *hdev)
1682 skb_queue_purge(&hdev->driver_init);
1684 /* will free via device release */
1685 put_device(&hdev->dev);
1687 EXPORT_SYMBOL(hci_free_dev);
1689 /* Register HCI device */
1690 int hci_register_dev(struct hci_dev *hdev)
1692 struct list_head *head, *p;
1695 if (!hdev->open || !hdev->close)
1698 write_lock(&hci_dev_list_lock);
1700 /* Do not allow HCI_AMP devices to register at index 0,
1701 * so the index can be used as the AMP controller ID.
1703 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1704 head = &hci_dev_list;
1706 /* Find first available device id */
1707 list_for_each(p, &hci_dev_list) {
1708 int nid = list_entry(p, struct hci_dev, list)->id;
1716 sprintf(hdev->name, "hci%d", id);
1719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1721 list_add(&hdev->list, head);
1723 write_unlock(&hci_dev_list_lock);
1725 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1727 if (!hdev->workqueue) {
1732 error = hci_add_sysfs(hdev);
1736 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1737 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740 if (rfkill_register(hdev->rfkill) < 0) {
1741 rfkill_destroy(hdev->rfkill);
1742 hdev->rfkill = NULL;
1746 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1747 set_bit(HCI_SETUP, &hdev->dev_flags);
1748 schedule_work(&hdev->power_on);
1750 hci_notify(hdev, HCI_DEV_REG);
1756 destroy_workqueue(hdev->workqueue);
1758 write_lock(&hci_dev_list_lock);
1759 list_del(&hdev->list);
1760 write_unlock(&hci_dev_list_lock);
1764 EXPORT_SYMBOL(hci_register_dev);
1766 /* Unregister HCI device */
1767 void hci_unregister_dev(struct hci_dev *hdev)
1771 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1773 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1775 write_lock(&hci_dev_list_lock);
1776 list_del(&hdev->list);
1777 write_unlock(&hci_dev_list_lock);
1779 hci_dev_do_close(hdev);
1781 for (i = 0; i < NUM_REASSEMBLY; i++)
1782 kfree_skb(hdev->reassembly[i]);
1784 if (!test_bit(HCI_INIT, &hdev->flags) &&
1785 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1787 mgmt_index_removed(hdev);
1788 hci_dev_unlock(hdev);
1791 /* mgmt_index_removed should take care of emptying the
1793 BUG_ON(!list_empty(&hdev->mgmt_pending));
1795 hci_notify(hdev, HCI_DEV_UNREG);
1798 rfkill_unregister(hdev->rfkill);
1799 rfkill_destroy(hdev->rfkill);
1802 hci_del_sysfs(hdev);
1804 destroy_workqueue(hdev->workqueue);
1807 hci_blacklist_clear(hdev);
1808 hci_uuids_clear(hdev);
1809 hci_link_keys_clear(hdev);
1810 hci_smp_ltks_clear(hdev);
1811 hci_remote_oob_data_clear(hdev);
1812 hci_dev_unlock(hdev);
1816 EXPORT_SYMBOL(hci_unregister_dev);
1818 /* Suspend HCI device */
1819 int hci_suspend_dev(struct hci_dev *hdev)
1821 hci_notify(hdev, HCI_DEV_SUSPEND);
1824 EXPORT_SYMBOL(hci_suspend_dev);
1826 /* Resume HCI device */
1827 int hci_resume_dev(struct hci_dev *hdev)
1829 hci_notify(hdev, HCI_DEV_RESUME);
1832 EXPORT_SYMBOL(hci_resume_dev);
1834 /* Receive frame from HCI drivers */
1835 int hci_recv_frame(struct sk_buff *skb)
1837 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1838 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1839 && !test_bit(HCI_INIT, &hdev->flags))) {
1845 bt_cb(skb)->incoming = 1;
1848 __net_timestamp(skb);
1850 skb_queue_tail(&hdev->rx_q, skb);
1851 queue_work(hdev->workqueue, &hdev->rx_work);
1855 EXPORT_SYMBOL(hci_recv_frame);
1857 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1858 int count, __u8 index)
1863 struct sk_buff *skb;
1864 struct bt_skb_cb *scb;
1866 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1867 index >= NUM_REASSEMBLY)
1870 skb = hdev->reassembly[index];
1874 case HCI_ACLDATA_PKT:
1875 len = HCI_MAX_FRAME_SIZE;
1876 hlen = HCI_ACL_HDR_SIZE;
1879 len = HCI_MAX_EVENT_SIZE;
1880 hlen = HCI_EVENT_HDR_SIZE;
1882 case HCI_SCODATA_PKT:
1883 len = HCI_MAX_SCO_SIZE;
1884 hlen = HCI_SCO_HDR_SIZE;
1888 skb = bt_skb_alloc(len, GFP_ATOMIC);
1892 scb = (void *) skb->cb;
1894 scb->pkt_type = type;
1896 skb->dev = (void *) hdev;
1897 hdev->reassembly[index] = skb;
1901 scb = (void *) skb->cb;
1902 len = min_t(uint, scb->expect, count);
1904 memcpy(skb_put(skb, len), data, len);
1913 if (skb->len == HCI_EVENT_HDR_SIZE) {
1914 struct hci_event_hdr *h = hci_event_hdr(skb);
1915 scb->expect = h->plen;
1917 if (skb_tailroom(skb) < scb->expect) {
1919 hdev->reassembly[index] = NULL;
1925 case HCI_ACLDATA_PKT:
1926 if (skb->len == HCI_ACL_HDR_SIZE) {
1927 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1928 scb->expect = __le16_to_cpu(h->dlen);
1930 if (skb_tailroom(skb) < scb->expect) {
1932 hdev->reassembly[index] = NULL;
1938 case HCI_SCODATA_PKT:
1939 if (skb->len == HCI_SCO_HDR_SIZE) {
1940 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1941 scb->expect = h->dlen;
1943 if (skb_tailroom(skb) < scb->expect) {
1945 hdev->reassembly[index] = NULL;
1952 if (scb->expect == 0) {
1953 /* Complete frame */
1955 bt_cb(skb)->pkt_type = type;
1956 hci_recv_frame(skb);
1958 hdev->reassembly[index] = NULL;
1966 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1970 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1974 rem = hci_reassembly(hdev, type, data, count, type - 1);
1978 data += (count - rem);
1984 EXPORT_SYMBOL(hci_recv_fragment);
1986 #define STREAM_REASSEMBLY 0
1988 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1994 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1997 struct { char type; } *pkt;
1999 /* Start of the frame */
2006 type = bt_cb(skb)->pkt_type;
2008 rem = hci_reassembly(hdev, type, data, count,
2013 data += (count - rem);
2019 EXPORT_SYMBOL(hci_recv_stream_fragment);
2021 /* ---- Interface to upper protocols ---- */
2023 int hci_register_cb(struct hci_cb *cb)
2025 BT_DBG("%p name %s", cb, cb->name);
2027 write_lock(&hci_cb_list_lock);
2028 list_add(&cb->list, &hci_cb_list);
2029 write_unlock(&hci_cb_list_lock);
2033 EXPORT_SYMBOL(hci_register_cb);
2035 int hci_unregister_cb(struct hci_cb *cb)
2037 BT_DBG("%p name %s", cb, cb->name);
2039 write_lock(&hci_cb_list_lock);
2040 list_del(&cb->list);
2041 write_unlock(&hci_cb_list_lock);
2045 EXPORT_SYMBOL(hci_unregister_cb);
2047 static int hci_send_frame(struct sk_buff *skb)
2049 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2056 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2059 __net_timestamp(skb);
2061 /* Send copy to monitor */
2062 hci_send_to_monitor(hdev, skb);
2064 if (atomic_read(&hdev->promisc)) {
2065 /* Send copy to the sockets */
2066 hci_send_to_sock(hdev, skb);
2069 /* Get rid of skb owner, prior to sending to the driver. */
2072 return hdev->send(skb);
2075 /* Send HCI command */
2076 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2078 int len = HCI_COMMAND_HDR_SIZE + plen;
2079 struct hci_command_hdr *hdr;
2080 struct sk_buff *skb;
2082 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2084 skb = bt_skb_alloc(len, GFP_ATOMIC);
2086 BT_ERR("%s no memory for command", hdev->name);
2090 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2091 hdr->opcode = cpu_to_le16(opcode);
2095 memcpy(skb_put(skb, plen), param, plen);
2097 BT_DBG("skb len %d", skb->len);
2099 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2100 skb->dev = (void *) hdev;
2102 if (test_bit(HCI_INIT, &hdev->flags))
2103 hdev->init_last_cmd = opcode;
2105 skb_queue_tail(&hdev->cmd_q, skb);
2106 queue_work(hdev->workqueue, &hdev->cmd_work);
2111 /* Get data from the previously sent command */
2112 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2114 struct hci_command_hdr *hdr;
2116 if (!hdev->sent_cmd)
2119 hdr = (void *) hdev->sent_cmd->data;
2121 if (hdr->opcode != cpu_to_le16(opcode))
2124 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2126 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2130 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2132 struct hci_acl_hdr *hdr;
2135 skb_push(skb, HCI_ACL_HDR_SIZE);
2136 skb_reset_transport_header(skb);
2137 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2138 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2139 hdr->dlen = cpu_to_le16(len);
2142 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2143 struct sk_buff *skb, __u16 flags)
2145 struct hci_dev *hdev = conn->hdev;
2146 struct sk_buff *list;
2148 skb->len = skb_headlen(skb);
2151 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2152 hci_add_acl_hdr(skb, conn->handle, flags);
2154 list = skb_shinfo(skb)->frag_list;
2156 /* Non fragmented */
2157 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2159 skb_queue_tail(queue, skb);
2162 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2164 skb_shinfo(skb)->frag_list = NULL;
2166 /* Queue all fragments atomically */
2167 spin_lock(&queue->lock);
2169 __skb_queue_tail(queue, skb);
2171 flags &= ~ACL_START;
2174 skb = list; list = list->next;
2176 skb->dev = (void *) hdev;
2177 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2178 hci_add_acl_hdr(skb, conn->handle, flags);
2180 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2182 __skb_queue_tail(queue, skb);
2185 spin_unlock(&queue->lock);
2189 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2191 struct hci_conn *conn = chan->conn;
2192 struct hci_dev *hdev = conn->hdev;
2194 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2196 skb->dev = (void *) hdev;
2198 hci_queue_acl(conn, &chan->data_q, skb, flags);
2200 queue_work(hdev->workqueue, &hdev->tx_work);
2204 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2206 struct hci_dev *hdev = conn->hdev;
2207 struct hci_sco_hdr hdr;
2209 BT_DBG("%s len %d", hdev->name, skb->len);
2211 hdr.handle = cpu_to_le16(conn->handle);
2212 hdr.dlen = skb->len;
2214 skb_push(skb, HCI_SCO_HDR_SIZE);
2215 skb_reset_transport_header(skb);
2216 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2218 skb->dev = (void *) hdev;
2219 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2221 skb_queue_tail(&conn->data_q, skb);
2222 queue_work(hdev->workqueue, &hdev->tx_work);
2225 /* ---- HCI TX task (outgoing data) ---- */
2227 /* HCI Connection scheduler */
2228 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2231 struct hci_conn_hash *h = &hdev->conn_hash;
2232 struct hci_conn *conn = NULL, *c;
2233 unsigned int num = 0, min = ~0;
2235 /* We don't have to lock device here. Connections are always
2236 * added and removed with TX task disabled. */
2240 list_for_each_entry_rcu(c, &h->list, list) {
2241 if (c->type != type || skb_queue_empty(&c->data_q))
2244 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2249 if (c->sent < min) {
2254 if (hci_conn_num(hdev, type) == num)
2263 switch (conn->type) {
2265 cnt = hdev->acl_cnt;
2269 cnt = hdev->sco_cnt;
2272 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2276 BT_ERR("Unknown link type");
2284 BT_DBG("conn %p quote %d", conn, *quote);
2288 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2290 struct hci_conn_hash *h = &hdev->conn_hash;
2293 BT_ERR("%s link tx timeout", hdev->name);
2297 /* Kill stalled connections */
2298 list_for_each_entry_rcu(c, &h->list, list) {
2299 if (c->type == type && c->sent) {
2300 BT_ERR("%s killing stalled connection %s",
2301 hdev->name, batostr(&c->dst));
2302 hci_acl_disconn(c, 0x13);
2309 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2312 struct hci_conn_hash *h = &hdev->conn_hash;
2313 struct hci_chan *chan = NULL;
2314 unsigned int num = 0, min = ~0, cur_prio = 0;
2315 struct hci_conn *conn;
2316 int cnt, q, conn_num = 0;
2318 BT_DBG("%s", hdev->name);
2322 list_for_each_entry_rcu(conn, &h->list, list) {
2323 struct hci_chan *tmp;
2325 if (conn->type != type)
2328 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2333 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2334 struct sk_buff *skb;
2336 if (skb_queue_empty(&tmp->data_q))
2339 skb = skb_peek(&tmp->data_q);
2340 if (skb->priority < cur_prio)
2343 if (skb->priority > cur_prio) {
2346 cur_prio = skb->priority;
2351 if (conn->sent < min) {
2357 if (hci_conn_num(hdev, type) == conn_num)
2366 switch (chan->conn->type) {
2368 cnt = hdev->acl_cnt;
2372 cnt = hdev->sco_cnt;
2375 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2379 BT_ERR("Unknown link type");
2384 BT_DBG("chan %p quote %d", chan, *quote);
2388 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2390 struct hci_conn_hash *h = &hdev->conn_hash;
2391 struct hci_conn *conn;
2394 BT_DBG("%s", hdev->name);
2398 list_for_each_entry_rcu(conn, &h->list, list) {
2399 struct hci_chan *chan;
2401 if (conn->type != type)
2404 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2409 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2410 struct sk_buff *skb;
2417 if (skb_queue_empty(&chan->data_q))
2420 skb = skb_peek(&chan->data_q);
2421 if (skb->priority >= HCI_PRIO_MAX - 1)
2424 skb->priority = HCI_PRIO_MAX - 1;
2426 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2430 if (hci_conn_num(hdev, type) == num)
2438 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2440 /* Calculate count of blocks used by this packet */
2441 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2444 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2446 if (!test_bit(HCI_RAW, &hdev->flags)) {
2447 /* ACL tx timeout must be longer than maximum
2448 * link supervision timeout (40.9 seconds) */
2449 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2450 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2451 hci_link_tx_to(hdev, ACL_LINK);
2455 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2457 unsigned int cnt = hdev->acl_cnt;
2458 struct hci_chan *chan;
2459 struct sk_buff *skb;
2462 __check_timeout(hdev, cnt);
2464 while (hdev->acl_cnt &&
2465 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2466 u32 priority = (skb_peek(&chan->data_q))->priority;
2467 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2468 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2469 skb->len, skb->priority);
2471 /* Stop if priority has changed */
2472 if (skb->priority < priority)
2475 skb = skb_dequeue(&chan->data_q);
2477 hci_conn_enter_active_mode(chan->conn,
2478 bt_cb(skb)->force_active);
2480 hci_send_frame(skb);
2481 hdev->acl_last_tx = jiffies;
2489 if (cnt != hdev->acl_cnt)
2490 hci_prio_recalculate(hdev, ACL_LINK);
2493 static void hci_sched_acl_blk(struct hci_dev *hdev)
2495 unsigned int cnt = hdev->block_cnt;
2496 struct hci_chan *chan;
2497 struct sk_buff *skb;
2500 __check_timeout(hdev, cnt);
2502 while (hdev->block_cnt > 0 &&
2503 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2504 u32 priority = (skb_peek(&chan->data_q))->priority;
2505 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2508 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2509 skb->len, skb->priority);
2511 /* Stop if priority has changed */
2512 if (skb->priority < priority)
2515 skb = skb_dequeue(&chan->data_q);
2517 blocks = __get_blocks(hdev, skb);
2518 if (blocks > hdev->block_cnt)
2521 hci_conn_enter_active_mode(chan->conn,
2522 bt_cb(skb)->force_active);
2524 hci_send_frame(skb);
2525 hdev->acl_last_tx = jiffies;
2527 hdev->block_cnt -= blocks;
2530 chan->sent += blocks;
2531 chan->conn->sent += blocks;
2535 if (cnt != hdev->block_cnt)
2536 hci_prio_recalculate(hdev, ACL_LINK);
2539 static void hci_sched_acl(struct hci_dev *hdev)
2541 BT_DBG("%s", hdev->name);
2543 if (!hci_conn_num(hdev, ACL_LINK))
2546 switch (hdev->flow_ctl_mode) {
2547 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2548 hci_sched_acl_pkt(hdev);
2551 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2552 hci_sched_acl_blk(hdev);
2558 static void hci_sched_sco(struct hci_dev *hdev)
2560 struct hci_conn *conn;
2561 struct sk_buff *skb;
2564 BT_DBG("%s", hdev->name);
2566 if (!hci_conn_num(hdev, SCO_LINK))
2569 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2570 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2571 BT_DBG("skb %p len %d", skb, skb->len);
2572 hci_send_frame(skb);
2575 if (conn->sent == ~0)
2581 static void hci_sched_esco(struct hci_dev *hdev)
2583 struct hci_conn *conn;
2584 struct sk_buff *skb;
2587 BT_DBG("%s", hdev->name);
2589 if (!hci_conn_num(hdev, ESCO_LINK))
2592 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2594 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2595 BT_DBG("skb %p len %d", skb, skb->len);
2596 hci_send_frame(skb);
2599 if (conn->sent == ~0)
2605 static void hci_sched_le(struct hci_dev *hdev)
2607 struct hci_chan *chan;
2608 struct sk_buff *skb;
2609 int quote, cnt, tmp;
2611 BT_DBG("%s", hdev->name);
2613 if (!hci_conn_num(hdev, LE_LINK))
2616 if (!test_bit(HCI_RAW, &hdev->flags)) {
2617 /* LE tx timeout must be longer than maximum
2618 * link supervision timeout (40.9 seconds) */
2619 if (!hdev->le_cnt && hdev->le_pkts &&
2620 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2621 hci_link_tx_to(hdev, LE_LINK);
2624 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2626 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2627 u32 priority = (skb_peek(&chan->data_q))->priority;
2628 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2629 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2630 skb->len, skb->priority);
2632 /* Stop if priority has changed */
2633 if (skb->priority < priority)
2636 skb = skb_dequeue(&chan->data_q);
2638 hci_send_frame(skb);
2639 hdev->le_last_tx = jiffies;
2650 hdev->acl_cnt = cnt;
2653 hci_prio_recalculate(hdev, LE_LINK);
2656 static void hci_tx_work(struct work_struct *work)
2658 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2659 struct sk_buff *skb;
2661 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2662 hdev->sco_cnt, hdev->le_cnt);
2664 /* Schedule queues and send stuff to HCI driver */
2666 hci_sched_acl(hdev);
2668 hci_sched_sco(hdev);
2670 hci_sched_esco(hdev);
2674 /* Send next queued raw (unknown type) packet */
2675 while ((skb = skb_dequeue(&hdev->raw_q)))
2676 hci_send_frame(skb);
2679 /* ----- HCI RX task (incoming data processing) ----- */
2681 /* ACL data packet */
2682 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2684 struct hci_acl_hdr *hdr = (void *) skb->data;
2685 struct hci_conn *conn;
2686 __u16 handle, flags;
2688 skb_pull(skb, HCI_ACL_HDR_SIZE);
2690 handle = __le16_to_cpu(hdr->handle);
2691 flags = hci_flags(handle);
2692 handle = hci_handle(handle);
2694 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2697 hdev->stat.acl_rx++;
2700 conn = hci_conn_hash_lookup_handle(hdev, handle);
2701 hci_dev_unlock(hdev);
2704 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2707 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2708 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2709 mgmt_device_connected(hdev, &conn->dst, conn->type,
2710 conn->dst_type, 0, NULL, 0,
2712 hci_dev_unlock(hdev);
2714 /* Send to upper protocol */
2715 l2cap_recv_acldata(conn, skb, flags);
2718 BT_ERR("%s ACL packet for unknown connection handle %d",
2719 hdev->name, handle);
2725 /* SCO data packet */
2726 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2728 struct hci_sco_hdr *hdr = (void *) skb->data;
2729 struct hci_conn *conn;
2732 skb_pull(skb, HCI_SCO_HDR_SIZE);
2734 handle = __le16_to_cpu(hdr->handle);
2736 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2738 hdev->stat.sco_rx++;
2741 conn = hci_conn_hash_lookup_handle(hdev, handle);
2742 hci_dev_unlock(hdev);
2745 /* Send to upper protocol */
2746 sco_recv_scodata(conn, skb);
2749 BT_ERR("%s SCO packet for unknown connection handle %d",
2750 hdev->name, handle);
2756 static void hci_rx_work(struct work_struct *work)
2758 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2759 struct sk_buff *skb;
2761 BT_DBG("%s", hdev->name);
2763 while ((skb = skb_dequeue(&hdev->rx_q))) {
2764 /* Send copy to monitor */
2765 hci_send_to_monitor(hdev, skb);
2767 if (atomic_read(&hdev->promisc)) {
2768 /* Send copy to the sockets */
2769 hci_send_to_sock(hdev, skb);
2772 if (test_bit(HCI_RAW, &hdev->flags)) {
2777 if (test_bit(HCI_INIT, &hdev->flags)) {
2778 /* Don't process data packets in this states. */
2779 switch (bt_cb(skb)->pkt_type) {
2780 case HCI_ACLDATA_PKT:
2781 case HCI_SCODATA_PKT:
2788 switch (bt_cb(skb)->pkt_type) {
2790 BT_DBG("%s Event packet", hdev->name);
2791 hci_event_packet(hdev, skb);
2794 case HCI_ACLDATA_PKT:
2795 BT_DBG("%s ACL data packet", hdev->name);
2796 hci_acldata_packet(hdev, skb);
2799 case HCI_SCODATA_PKT:
2800 BT_DBG("%s SCO data packet", hdev->name);
2801 hci_scodata_packet(hdev, skb);
2811 static void hci_cmd_work(struct work_struct *work)
2813 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2814 struct sk_buff *skb;
2816 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2818 /* Send queued commands */
2819 if (atomic_read(&hdev->cmd_cnt)) {
2820 skb = skb_dequeue(&hdev->cmd_q);
2824 kfree_skb(hdev->sent_cmd);
2826 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2827 if (hdev->sent_cmd) {
2828 atomic_dec(&hdev->cmd_cnt);
2829 hci_send_frame(skb);
2830 if (test_bit(HCI_RESET, &hdev->flags))
2831 del_timer(&hdev->cmd_timer);
2833 mod_timer(&hdev->cmd_timer,
2834 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2836 skb_queue_head(&hdev->cmd_q, skb);
2837 queue_work(hdev->workqueue, &hdev->cmd_work);
2842 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2844 /* General inquiry access code (GIAC) */
2845 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2846 struct hci_cp_inquiry cp;
2848 BT_DBG("%s", hdev->name);
2850 if (test_bit(HCI_INQUIRY, &hdev->flags))
2851 return -EINPROGRESS;
2853 inquiry_cache_flush(hdev);
2855 memset(&cp, 0, sizeof(cp));
2856 memcpy(&cp.lap, lap, sizeof(cp.lap));
2859 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2862 int hci_cancel_inquiry(struct hci_dev *hdev)
2864 BT_DBG("%s", hdev->name);
2866 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2869 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2872 u8 bdaddr_to_le(u8 bdaddr_type)
2874 switch (bdaddr_type) {
2875 case BDADDR_LE_PUBLIC:
2876 return ADDR_LE_DEV_PUBLIC;
2879 /* Fallback to LE Random address type */
2880 return ADDR_LE_DEV_RANDOM;