OSDN Git Service

73b459658ccc24c014921a16118a0cb17ceb40dc
[android-x86/kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29
30 #include <linux/rfkill.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34
35 #define AUTO_OFF_TIMEOUT 2000
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* ---- HCI notifications ---- */
50
51 static void hci_notify(struct hci_dev *hdev, int event)
52 {
53         hci_sock_dev_event(hdev, event);
54 }
55
56 /* ---- HCI requests ---- */
57
58 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
59 {
60         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
61
62         /* If this is the init phase check if the completed command matches
63          * the last init command, and if not just return.
64          */
65         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
66                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
67                 u16 opcode = __le16_to_cpu(sent->opcode);
68                 struct sk_buff *skb;
69
70                 /* Some CSR based controllers generate a spontaneous
71                  * reset complete event during init and any pending
72                  * command will never be completed. In such a case we
73                  * need to resend whatever was the last sent
74                  * command.
75                  */
76
77                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
78                         return;
79
80                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
81                 if (skb) {
82                         skb_queue_head(&hdev->cmd_q, skb);
83                         queue_work(hdev->workqueue, &hdev->cmd_work);
84                 }
85
86                 return;
87         }
88
89         if (hdev->req_status == HCI_REQ_PEND) {
90                 hdev->req_result = result;
91                 hdev->req_status = HCI_REQ_DONE;
92                 wake_up_interruptible(&hdev->req_wait_q);
93         }
94 }
95
96 static void hci_req_cancel(struct hci_dev *hdev, int err)
97 {
98         BT_DBG("%s err 0x%2.2x", hdev->name, err);
99
100         if (hdev->req_status == HCI_REQ_PEND) {
101                 hdev->req_result = err;
102                 hdev->req_status = HCI_REQ_CANCELED;
103                 wake_up_interruptible(&hdev->req_wait_q);
104         }
105 }
106
107 /* Execute request and wait for completion. */
108 static int __hci_request(struct hci_dev *hdev,
109                          void (*req)(struct hci_dev *hdev, unsigned long opt),
110                          unsigned long opt, __u32 timeout)
111 {
112         DECLARE_WAITQUEUE(wait, current);
113         int err = 0;
114
115         BT_DBG("%s start", hdev->name);
116
117         hdev->req_status = HCI_REQ_PEND;
118
119         add_wait_queue(&hdev->req_wait_q, &wait);
120         set_current_state(TASK_INTERRUPTIBLE);
121
122         req(hdev, opt);
123         schedule_timeout(timeout);
124
125         remove_wait_queue(&hdev->req_wait_q, &wait);
126
127         if (signal_pending(current))
128                 return -EINTR;
129
130         switch (hdev->req_status) {
131         case HCI_REQ_DONE:
132                 err = -bt_to_errno(hdev->req_result);
133                 break;
134
135         case HCI_REQ_CANCELED:
136                 err = -hdev->req_result;
137                 break;
138
139         default:
140                 err = -ETIMEDOUT;
141                 break;
142         }
143
144         hdev->req_status = hdev->req_result = 0;
145
146         BT_DBG("%s end: err %d", hdev->name, err);
147
148         return err;
149 }
150
151 static int hci_request(struct hci_dev *hdev,
152                        void (*req)(struct hci_dev *hdev, unsigned long opt),
153                        unsigned long opt, __u32 timeout)
154 {
155         int ret;
156
157         if (!test_bit(HCI_UP, &hdev->flags))
158                 return -ENETDOWN;
159
160         /* Serialize all requests */
161         hci_req_lock(hdev);
162         ret = __hci_request(hdev, req, opt, timeout);
163         hci_req_unlock(hdev);
164
165         return ret;
166 }
167
168 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
169 {
170         BT_DBG("%s %ld", hdev->name, opt);
171
172         /* Reset device */
173         set_bit(HCI_RESET, &hdev->flags);
174         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
175 }
176
177 static void bredr_init(struct hci_dev *hdev)
178 {
179         struct hci_cp_delete_stored_link_key cp;
180         __le16 param;
181         __u8 flt_type;
182
183         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
184
185         /* Mandatory initialization */
186
187         /* Reset */
188         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
189                 set_bit(HCI_RESET, &hdev->flags);
190                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191         }
192
193         /* Read Local Supported Features */
194         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
195
196         /* Read Local Version */
197         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
198
199         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
201
202         /* Read BD Address */
203         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
204
205         /* Read Class of Device */
206         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
207
208         /* Read Local Name */
209         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
210
211         /* Read Voice Setting */
212         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
213
214         /* Optional initialization */
215
216         /* Clear Event Filters */
217         flt_type = HCI_FLT_CLEAR_ALL;
218         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
219
220         /* Connection accept timeout ~20 secs */
221         param = __constant_cpu_to_le16(0x7d00);
222         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
223
224         bacpy(&cp.bdaddr, BDADDR_ANY);
225         cp.delete_all = 1;
226         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
227 }
228
229 static void amp_init(struct hci_dev *hdev)
230 {
231         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
232
233         /* Reset */
234         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
235
236         /* Read Local Version */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
238
239         /* Read Local AMP Info */
240         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
241 }
242
243 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
244 {
245         struct sk_buff *skb;
246
247         BT_DBG("%s %ld", hdev->name, opt);
248
249         /* Driver initialization */
250
251         /* Special commands */
252         while ((skb = skb_dequeue(&hdev->driver_init))) {
253                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
254                 skb->dev = (void *) hdev;
255
256                 skb_queue_tail(&hdev->cmd_q, skb);
257                 queue_work(hdev->workqueue, &hdev->cmd_work);
258         }
259         skb_queue_purge(&hdev->driver_init);
260
261         switch (hdev->dev_type) {
262         case HCI_BREDR:
263                 bredr_init(hdev);
264                 break;
265
266         case HCI_AMP:
267                 amp_init(hdev);
268                 break;
269
270         default:
271                 BT_ERR("Unknown device type %d", hdev->dev_type);
272                 break;
273         }
274
275 }
276
277 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         BT_DBG("%s", hdev->name);
280
281         /* Read LE buffer size */
282         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
283 }
284
285 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
286 {
287         __u8 scan = opt;
288
289         BT_DBG("%s %x", hdev->name, scan);
290
291         /* Inquiry and Page scans */
292         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
293 }
294
295 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
296 {
297         __u8 auth = opt;
298
299         BT_DBG("%s %x", hdev->name, auth);
300
301         /* Authentication */
302         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
303 }
304
305 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
306 {
307         __u8 encrypt = opt;
308
309         BT_DBG("%s %x", hdev->name, encrypt);
310
311         /* Encryption */
312         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
313 }
314
315 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
316 {
317         __le16 policy = cpu_to_le16(opt);
318
319         BT_DBG("%s %x", hdev->name, policy);
320
321         /* Default link policy */
322         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
323 }
324
325 /* Get HCI device by index.
326  * Device is held on return. */
327 struct hci_dev *hci_dev_get(int index)
328 {
329         struct hci_dev *hdev = NULL, *d;
330
331         BT_DBG("%d", index);
332
333         if (index < 0)
334                 return NULL;
335
336         read_lock(&hci_dev_list_lock);
337         list_for_each_entry(d, &hci_dev_list, list) {
338                 if (d->id == index) {
339                         hdev = hci_dev_hold(d);
340                         break;
341                 }
342         }
343         read_unlock(&hci_dev_list_lock);
344         return hdev;
345 }
346
347 /* ---- Inquiry support ---- */
348
349 bool hci_discovery_active(struct hci_dev *hdev)
350 {
351         struct discovery_state *discov = &hdev->discovery;
352
353         switch (discov->state) {
354         case DISCOVERY_FINDING:
355         case DISCOVERY_RESOLVING:
356                 return true;
357
358         default:
359                 return false;
360         }
361 }
362
363 void hci_discovery_set_state(struct hci_dev *hdev, int state)
364 {
365         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
366
367         if (hdev->discovery.state == state)
368                 return;
369
370         switch (state) {
371         case DISCOVERY_STOPPED:
372                 if (hdev->discovery.state != DISCOVERY_STARTING)
373                         mgmt_discovering(hdev, 0);
374                 break;
375         case DISCOVERY_STARTING:
376                 break;
377         case DISCOVERY_FINDING:
378                 mgmt_discovering(hdev, 1);
379                 break;
380         case DISCOVERY_RESOLVING:
381                 break;
382         case DISCOVERY_STOPPING:
383                 break;
384         }
385
386         hdev->discovery.state = state;
387 }
388
389 static void inquiry_cache_flush(struct hci_dev *hdev)
390 {
391         struct discovery_state *cache = &hdev->discovery;
392         struct inquiry_entry *p, *n;
393
394         list_for_each_entry_safe(p, n, &cache->all, all) {
395                 list_del(&p->all);
396                 kfree(p);
397         }
398
399         INIT_LIST_HEAD(&cache->unknown);
400         INIT_LIST_HEAD(&cache->resolve);
401 }
402
403 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
404                                                bdaddr_t *bdaddr)
405 {
406         struct discovery_state *cache = &hdev->discovery;
407         struct inquiry_entry *e;
408
409         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
410
411         list_for_each_entry(e, &cache->all, all) {
412                 if (!bacmp(&e->data.bdaddr, bdaddr))
413                         return e;
414         }
415
416         return NULL;
417 }
418
419 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
420                                                        bdaddr_t *bdaddr)
421 {
422         struct discovery_state *cache = &hdev->discovery;
423         struct inquiry_entry *e;
424
425         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
427         list_for_each_entry(e, &cache->unknown, list) {
428                 if (!bacmp(&e->data.bdaddr, bdaddr))
429                         return e;
430         }
431
432         return NULL;
433 }
434
435 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
436                                                        bdaddr_t *bdaddr,
437                                                        int state)
438 {
439         struct discovery_state *cache = &hdev->discovery;
440         struct inquiry_entry *e;
441
442         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
443
444         list_for_each_entry(e, &cache->resolve, list) {
445                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
446                         return e;
447                 if (!bacmp(&e->data.bdaddr, bdaddr))
448                         return e;
449         }
450
451         return NULL;
452 }
453
454 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
455                                       struct inquiry_entry *ie)
456 {
457         struct discovery_state *cache = &hdev->discovery;
458         struct list_head *pos = &cache->resolve;
459         struct inquiry_entry *p;
460
461         list_del(&ie->list);
462
463         list_for_each_entry(p, &cache->resolve, list) {
464                 if (p->name_state != NAME_PENDING &&
465                     abs(p->data.rssi) >= abs(ie->data.rssi))
466                         break;
467                 pos = &p->list;
468         }
469
470         list_add(&ie->list, pos);
471 }
472
473 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
474                               bool name_known, bool *ssp)
475 {
476         struct discovery_state *cache = &hdev->discovery;
477         struct inquiry_entry *ie;
478
479         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
480
481         if (ssp)
482                 *ssp = data->ssp_mode;
483
484         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
485         if (ie) {
486                 if (ie->data.ssp_mode && ssp)
487                         *ssp = true;
488
489                 if (ie->name_state == NAME_NEEDED &&
490                     data->rssi != ie->data.rssi) {
491                         ie->data.rssi = data->rssi;
492                         hci_inquiry_cache_update_resolve(hdev, ie);
493                 }
494
495                 goto update;
496         }
497
498         /* Entry not in the cache. Add new one. */
499         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500         if (!ie)
501                 return false;
502
503         list_add(&ie->all, &cache->all);
504
505         if (name_known) {
506                 ie->name_state = NAME_KNOWN;
507         } else {
508                 ie->name_state = NAME_NOT_KNOWN;
509                 list_add(&ie->list, &cache->unknown);
510         }
511
512 update:
513         if (name_known && ie->name_state != NAME_KNOWN &&
514             ie->name_state != NAME_PENDING) {
515                 ie->name_state = NAME_KNOWN;
516                 list_del(&ie->list);
517         }
518
519         memcpy(&ie->data, data, sizeof(*data));
520         ie->timestamp = jiffies;
521         cache->timestamp = jiffies;
522
523         if (ie->name_state == NAME_NOT_KNOWN)
524                 return false;
525
526         return true;
527 }
528
529 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530 {
531         struct discovery_state *cache = &hdev->discovery;
532         struct inquiry_info *info = (struct inquiry_info *) buf;
533         struct inquiry_entry *e;
534         int copied = 0;
535
536         list_for_each_entry(e, &cache->all, all) {
537                 struct inquiry_data *data = &e->data;
538
539                 if (copied >= num)
540                         break;
541
542                 bacpy(&info->bdaddr, &data->bdaddr);
543                 info->pscan_rep_mode    = data->pscan_rep_mode;
544                 info->pscan_period_mode = data->pscan_period_mode;
545                 info->pscan_mode        = data->pscan_mode;
546                 memcpy(info->dev_class, data->dev_class, 3);
547                 info->clock_offset      = data->clock_offset;
548
549                 info++;
550                 copied++;
551         }
552
553         BT_DBG("cache %p, copied %d", cache, copied);
554         return copied;
555 }
556
557 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558 {
559         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560         struct hci_cp_inquiry cp;
561
562         BT_DBG("%s", hdev->name);
563
564         if (test_bit(HCI_INQUIRY, &hdev->flags))
565                 return;
566
567         /* Start Inquiry */
568         memcpy(&cp.lap, &ir->lap, 3);
569         cp.length  = ir->length;
570         cp.num_rsp = ir->num_rsp;
571         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
572 }
573
574 int hci_inquiry(void __user *arg)
575 {
576         __u8 __user *ptr = arg;
577         struct hci_inquiry_req ir;
578         struct hci_dev *hdev;
579         int err = 0, do_inquiry = 0, max_rsp;
580         long timeo;
581         __u8 *buf;
582
583         if (copy_from_user(&ir, ptr, sizeof(ir)))
584                 return -EFAULT;
585
586         hdev = hci_dev_get(ir.dev_id);
587         if (!hdev)
588                 return -ENODEV;
589
590         hci_dev_lock(hdev);
591         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
592             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
593                 inquiry_cache_flush(hdev);
594                 do_inquiry = 1;
595         }
596         hci_dev_unlock(hdev);
597
598         timeo = ir.length * msecs_to_jiffies(2000);
599
600         if (do_inquiry) {
601                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
602                 if (err < 0)
603                         goto done;
604         }
605
606         /* for unlimited number of responses we will use buffer with
607          * 255 entries
608          */
609         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
610
611         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
612          * copy it to the user space.
613          */
614         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
615         if (!buf) {
616                 err = -ENOMEM;
617                 goto done;
618         }
619
620         hci_dev_lock(hdev);
621         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
622         hci_dev_unlock(hdev);
623
624         BT_DBG("num_rsp %d", ir.num_rsp);
625
626         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
627                 ptr += sizeof(ir);
628                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
629                                  ir.num_rsp))
630                         err = -EFAULT;
631         } else
632                 err = -EFAULT;
633
634         kfree(buf);
635
636 done:
637         hci_dev_put(hdev);
638         return err;
639 }
640
641 /* ---- HCI ioctl helpers ---- */
642
643 int hci_dev_open(__u16 dev)
644 {
645         struct hci_dev *hdev;
646         int ret = 0;
647
648         hdev = hci_dev_get(dev);
649         if (!hdev)
650                 return -ENODEV;
651
652         BT_DBG("%s %p", hdev->name, hdev);
653
654         hci_req_lock(hdev);
655
656         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
657                 ret = -ENODEV;
658                 goto done;
659         }
660
661         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
662                 ret = -ERFKILL;
663                 goto done;
664         }
665
666         if (test_bit(HCI_UP, &hdev->flags)) {
667                 ret = -EALREADY;
668                 goto done;
669         }
670
671         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
672                 set_bit(HCI_RAW, &hdev->flags);
673
674         /* Treat all non BR/EDR controllers as raw devices if
675            enable_hs is not set */
676         if (hdev->dev_type != HCI_BREDR && !enable_hs)
677                 set_bit(HCI_RAW, &hdev->flags);
678
679         if (hdev->open(hdev)) {
680                 ret = -EIO;
681                 goto done;
682         }
683
684         if (!test_bit(HCI_RAW, &hdev->flags)) {
685                 atomic_set(&hdev->cmd_cnt, 1);
686                 set_bit(HCI_INIT, &hdev->flags);
687                 hdev->init_last_cmd = 0;
688
689                 ret = __hci_request(hdev, hci_init_req, 0,
690                                     msecs_to_jiffies(HCI_INIT_TIMEOUT));
691
692                 if (lmp_host_le_capable(hdev))
693                         ret = __hci_request(hdev, hci_le_init_req, 0,
694                                             msecs_to_jiffies(HCI_INIT_TIMEOUT));
695
696                 clear_bit(HCI_INIT, &hdev->flags);
697         }
698
699         if (!ret) {
700                 hci_dev_hold(hdev);
701                 set_bit(HCI_UP, &hdev->flags);
702                 hci_notify(hdev, HCI_DEV_UP);
703                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
704                         hci_dev_lock(hdev);
705                         mgmt_powered(hdev, 1);
706                         hci_dev_unlock(hdev);
707                 }
708         } else {
709                 /* Init failed, cleanup */
710                 flush_work(&hdev->tx_work);
711                 flush_work(&hdev->cmd_work);
712                 flush_work(&hdev->rx_work);
713
714                 skb_queue_purge(&hdev->cmd_q);
715                 skb_queue_purge(&hdev->rx_q);
716
717                 if (hdev->flush)
718                         hdev->flush(hdev);
719
720                 if (hdev->sent_cmd) {
721                         kfree_skb(hdev->sent_cmd);
722                         hdev->sent_cmd = NULL;
723                 }
724
725                 hdev->close(hdev);
726                 hdev->flags = 0;
727         }
728
729 done:
730         hci_req_unlock(hdev);
731         hci_dev_put(hdev);
732         return ret;
733 }
734
735 static int hci_dev_do_close(struct hci_dev *hdev)
736 {
737         BT_DBG("%s %p", hdev->name, hdev);
738
739         cancel_work_sync(&hdev->le_scan);
740
741         hci_req_cancel(hdev, ENODEV);
742         hci_req_lock(hdev);
743
744         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
745                 del_timer_sync(&hdev->cmd_timer);
746                 hci_req_unlock(hdev);
747                 return 0;
748         }
749
750         /* Flush RX and TX works */
751         flush_work(&hdev->tx_work);
752         flush_work(&hdev->rx_work);
753
754         if (hdev->discov_timeout > 0) {
755                 cancel_delayed_work(&hdev->discov_off);
756                 hdev->discov_timeout = 0;
757                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
758         }
759
760         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
761                 cancel_delayed_work(&hdev->service_cache);
762
763         cancel_delayed_work_sync(&hdev->le_scan_disable);
764
765         hci_dev_lock(hdev);
766         inquiry_cache_flush(hdev);
767         hci_conn_hash_flush(hdev);
768         hci_dev_unlock(hdev);
769
770         hci_notify(hdev, HCI_DEV_DOWN);
771
772         if (hdev->flush)
773                 hdev->flush(hdev);
774
775         /* Reset device */
776         skb_queue_purge(&hdev->cmd_q);
777         atomic_set(&hdev->cmd_cnt, 1);
778         if (!test_bit(HCI_RAW, &hdev->flags) &&
779             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
780                 set_bit(HCI_INIT, &hdev->flags);
781                 __hci_request(hdev, hci_reset_req, 0,
782                               msecs_to_jiffies(250));
783                 clear_bit(HCI_INIT, &hdev->flags);
784         }
785
786         /* flush cmd  work */
787         flush_work(&hdev->cmd_work);
788
789         /* Drop queues */
790         skb_queue_purge(&hdev->rx_q);
791         skb_queue_purge(&hdev->cmd_q);
792         skb_queue_purge(&hdev->raw_q);
793
794         /* Drop last sent command */
795         if (hdev->sent_cmd) {
796                 del_timer_sync(&hdev->cmd_timer);
797                 kfree_skb(hdev->sent_cmd);
798                 hdev->sent_cmd = NULL;
799         }
800
801         /* After this point our queues are empty
802          * and no tasks are scheduled. */
803         hdev->close(hdev);
804
805         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
806                 hci_dev_lock(hdev);
807                 mgmt_powered(hdev, 0);
808                 hci_dev_unlock(hdev);
809         }
810
811         /* Clear flags */
812         hdev->flags = 0;
813
814         memset(hdev->eir, 0, sizeof(hdev->eir));
815         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
816
817         hci_req_unlock(hdev);
818
819         hci_dev_put(hdev);
820         return 0;
821 }
822
823 int hci_dev_close(__u16 dev)
824 {
825         struct hci_dev *hdev;
826         int err;
827
828         hdev = hci_dev_get(dev);
829         if (!hdev)
830                 return -ENODEV;
831
832         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
833                 cancel_delayed_work(&hdev->power_off);
834
835         err = hci_dev_do_close(hdev);
836
837         hci_dev_put(hdev);
838         return err;
839 }
840
841 int hci_dev_reset(__u16 dev)
842 {
843         struct hci_dev *hdev;
844         int ret = 0;
845
846         hdev = hci_dev_get(dev);
847         if (!hdev)
848                 return -ENODEV;
849
850         hci_req_lock(hdev);
851
852         if (!test_bit(HCI_UP, &hdev->flags))
853                 goto done;
854
855         /* Drop queues */
856         skb_queue_purge(&hdev->rx_q);
857         skb_queue_purge(&hdev->cmd_q);
858
859         hci_dev_lock(hdev);
860         inquiry_cache_flush(hdev);
861         hci_conn_hash_flush(hdev);
862         hci_dev_unlock(hdev);
863
864         if (hdev->flush)
865                 hdev->flush(hdev);
866
867         atomic_set(&hdev->cmd_cnt, 1);
868         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
869
870         if (!test_bit(HCI_RAW, &hdev->flags))
871                 ret = __hci_request(hdev, hci_reset_req, 0,
872                                     msecs_to_jiffies(HCI_INIT_TIMEOUT));
873
874 done:
875         hci_req_unlock(hdev);
876         hci_dev_put(hdev);
877         return ret;
878 }
879
880 int hci_dev_reset_stat(__u16 dev)
881 {
882         struct hci_dev *hdev;
883         int ret = 0;
884
885         hdev = hci_dev_get(dev);
886         if (!hdev)
887                 return -ENODEV;
888
889         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890
891         hci_dev_put(hdev);
892
893         return ret;
894 }
895
896 int hci_dev_cmd(unsigned int cmd, void __user *arg)
897 {
898         struct hci_dev *hdev;
899         struct hci_dev_req dr;
900         int err = 0;
901
902         if (copy_from_user(&dr, arg, sizeof(dr)))
903                 return -EFAULT;
904
905         hdev = hci_dev_get(dr.dev_id);
906         if (!hdev)
907                 return -ENODEV;
908
909         switch (cmd) {
910         case HCISETAUTH:
911                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
912                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
913                 break;
914
915         case HCISETENCRYPT:
916                 if (!lmp_encrypt_capable(hdev)) {
917                         err = -EOPNOTSUPP;
918                         break;
919                 }
920
921                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922                         /* Auth must be enabled first */
923                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924                                           msecs_to_jiffies(HCI_INIT_TIMEOUT));
925                         if (err)
926                                 break;
927                 }
928
929                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
930                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
931                 break;
932
933         case HCISETSCAN:
934                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
935                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
936                 break;
937
938         case HCISETLINKPOL:
939                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
940                                   msecs_to_jiffies(HCI_INIT_TIMEOUT));
941                 break;
942
943         case HCISETLINKMODE:
944                 hdev->link_mode = ((__u16) dr.dev_opt) &
945                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
946                 break;
947
948         case HCISETPTYPE:
949                 hdev->pkt_type = (__u16) dr.dev_opt;
950                 break;
951
952         case HCISETACLMTU:
953                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
954                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
955                 break;
956
957         case HCISETSCOMTU:
958                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
959                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
960                 break;
961
962         default:
963                 err = -EINVAL;
964                 break;
965         }
966
967         hci_dev_put(hdev);
968         return err;
969 }
970
971 int hci_get_dev_list(void __user *arg)
972 {
973         struct hci_dev *hdev;
974         struct hci_dev_list_req *dl;
975         struct hci_dev_req *dr;
976         int n = 0, size, err;
977         __u16 dev_num;
978
979         if (get_user(dev_num, (__u16 __user *) arg))
980                 return -EFAULT;
981
982         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983                 return -EINVAL;
984
985         size = sizeof(*dl) + dev_num * sizeof(*dr);
986
987         dl = kzalloc(size, GFP_KERNEL);
988         if (!dl)
989                 return -ENOMEM;
990
991         dr = dl->dev_req;
992
993         read_lock(&hci_dev_list_lock);
994         list_for_each_entry(hdev, &hci_dev_list, list) {
995                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
996                         cancel_delayed_work(&hdev->power_off);
997
998                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1000
1001                 (dr + n)->dev_id  = hdev->id;
1002                 (dr + n)->dev_opt = hdev->flags;
1003
1004                 if (++n >= dev_num)
1005                         break;
1006         }
1007         read_unlock(&hci_dev_list_lock);
1008
1009         dl->dev_num = n;
1010         size = sizeof(*dl) + n * sizeof(*dr);
1011
1012         err = copy_to_user(arg, dl, size);
1013         kfree(dl);
1014
1015         return err ? -EFAULT : 0;
1016 }
1017
1018 int hci_get_dev_info(void __user *arg)
1019 {
1020         struct hci_dev *hdev;
1021         struct hci_dev_info di;
1022         int err = 0;
1023
1024         if (copy_from_user(&di, arg, sizeof(di)))
1025                 return -EFAULT;
1026
1027         hdev = hci_dev_get(di.dev_id);
1028         if (!hdev)
1029                 return -ENODEV;
1030
1031         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1032                 cancel_delayed_work_sync(&hdev->power_off);
1033
1034         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1036
1037         strcpy(di.name, hdev->name);
1038         di.bdaddr   = hdev->bdaddr;
1039         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040         di.flags    = hdev->flags;
1041         di.pkt_type = hdev->pkt_type;
1042         di.acl_mtu  = hdev->acl_mtu;
1043         di.acl_pkts = hdev->acl_pkts;
1044         di.sco_mtu  = hdev->sco_mtu;
1045         di.sco_pkts = hdev->sco_pkts;
1046         di.link_policy = hdev->link_policy;
1047         di.link_mode   = hdev->link_mode;
1048
1049         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050         memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052         if (copy_to_user(arg, &di, sizeof(di)))
1053                 err = -EFAULT;
1054
1055         hci_dev_put(hdev);
1056
1057         return err;
1058 }
1059
1060 /* ---- Interface to HCI drivers ---- */
1061
1062 static int hci_rfkill_set_block(void *data, bool blocked)
1063 {
1064         struct hci_dev *hdev = data;
1065
1066         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068         if (!blocked)
1069                 return 0;
1070
1071         hci_dev_do_close(hdev);
1072
1073         return 0;
1074 }
1075
1076 static const struct rfkill_ops hci_rfkill_ops = {
1077         .set_block = hci_rfkill_set_block,
1078 };
1079
1080 static void hci_power_on(struct work_struct *work)
1081 {
1082         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084         BT_DBG("%s", hdev->name);
1085
1086         if (hci_dev_open(hdev->id) < 0)
1087                 return;
1088
1089         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1090                 schedule_delayed_work(&hdev->power_off,
1091                                       msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1092
1093         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1094                 mgmt_index_added(hdev);
1095 }
1096
1097 static void hci_power_off(struct work_struct *work)
1098 {
1099         struct hci_dev *hdev = container_of(work, struct hci_dev,
1100                                             power_off.work);
1101
1102         BT_DBG("%s", hdev->name);
1103
1104         hci_dev_do_close(hdev);
1105 }
1106
1107 static void hci_discov_off(struct work_struct *work)
1108 {
1109         struct hci_dev *hdev;
1110         u8 scan = SCAN_PAGE;
1111
1112         hdev = container_of(work, struct hci_dev, discov_off.work);
1113
1114         BT_DBG("%s", hdev->name);
1115
1116         hci_dev_lock(hdev);
1117
1118         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1119
1120         hdev->discov_timeout = 0;
1121
1122         hci_dev_unlock(hdev);
1123 }
1124
1125 int hci_uuids_clear(struct hci_dev *hdev)
1126 {
1127         struct list_head *p, *n;
1128
1129         list_for_each_safe(p, n, &hdev->uuids) {
1130                 struct bt_uuid *uuid;
1131
1132                 uuid = list_entry(p, struct bt_uuid, list);
1133
1134                 list_del(p);
1135                 kfree(uuid);
1136         }
1137
1138         return 0;
1139 }
1140
1141 int hci_link_keys_clear(struct hci_dev *hdev)
1142 {
1143         struct list_head *p, *n;
1144
1145         list_for_each_safe(p, n, &hdev->link_keys) {
1146                 struct link_key *key;
1147
1148                 key = list_entry(p, struct link_key, list);
1149
1150                 list_del(p);
1151                 kfree(key);
1152         }
1153
1154         return 0;
1155 }
1156
1157 int hci_smp_ltks_clear(struct hci_dev *hdev)
1158 {
1159         struct smp_ltk *k, *tmp;
1160
1161         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1162                 list_del(&k->list);
1163                 kfree(k);
1164         }
1165
1166         return 0;
1167 }
1168
1169 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1170 {
1171         struct link_key *k;
1172
1173         list_for_each_entry(k, &hdev->link_keys, list)
1174                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1175                         return k;
1176
1177         return NULL;
1178 }
1179
1180 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1181                                u8 key_type, u8 old_key_type)
1182 {
1183         /* Legacy key */
1184         if (key_type < 0x03)
1185                 return true;
1186
1187         /* Debug keys are insecure so don't store them persistently */
1188         if (key_type == HCI_LK_DEBUG_COMBINATION)
1189                 return false;
1190
1191         /* Changed combination key and there's no previous one */
1192         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1193                 return false;
1194
1195         /* Security mode 3 case */
1196         if (!conn)
1197                 return true;
1198
1199         /* Neither local nor remote side had no-bonding as requirement */
1200         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1201                 return true;
1202
1203         /* Local side had dedicated bonding as requirement */
1204         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1205                 return true;
1206
1207         /* Remote side had dedicated bonding as requirement */
1208         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1209                 return true;
1210
1211         /* If none of the above criteria match, then don't store the key
1212          * persistently */
1213         return false;
1214 }
1215
1216 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1217 {
1218         struct smp_ltk *k;
1219
1220         list_for_each_entry(k, &hdev->long_term_keys, list) {
1221                 if (k->ediv != ediv ||
1222                     memcmp(rand, k->rand, sizeof(k->rand)))
1223                         continue;
1224
1225                 return k;
1226         }
1227
1228         return NULL;
1229 }
1230
1231 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1232                                      u8 addr_type)
1233 {
1234         struct smp_ltk *k;
1235
1236         list_for_each_entry(k, &hdev->long_term_keys, list)
1237                 if (addr_type == k->bdaddr_type &&
1238                     bacmp(bdaddr, &k->bdaddr) == 0)
1239                         return k;
1240
1241         return NULL;
1242 }
1243
1244 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1245                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1246 {
1247         struct link_key *key, *old_key;
1248         u8 old_key_type;
1249         bool persistent;
1250
1251         old_key = hci_find_link_key(hdev, bdaddr);
1252         if (old_key) {
1253                 old_key_type = old_key->type;
1254                 key = old_key;
1255         } else {
1256                 old_key_type = conn ? conn->key_type : 0xff;
1257                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1258                 if (!key)
1259                         return -ENOMEM;
1260                 list_add(&key->list, &hdev->link_keys);
1261         }
1262
1263         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1264
1265         /* Some buggy controller combinations generate a changed
1266          * combination key for legacy pairing even when there's no
1267          * previous key */
1268         if (type == HCI_LK_CHANGED_COMBINATION &&
1269             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1270                 type = HCI_LK_COMBINATION;
1271                 if (conn)
1272                         conn->key_type = type;
1273         }
1274
1275         bacpy(&key->bdaddr, bdaddr);
1276         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1277         key->pin_len = pin_len;
1278
1279         if (type == HCI_LK_CHANGED_COMBINATION)
1280                 key->type = old_key_type;
1281         else
1282                 key->type = type;
1283
1284         if (!new_key)
1285                 return 0;
1286
1287         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1288
1289         mgmt_new_link_key(hdev, key, persistent);
1290
1291         if (conn)
1292                 conn->flush_key = !persistent;
1293
1294         return 0;
1295 }
1296
1297 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1298                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1299                 ediv, u8 rand[8])
1300 {
1301         struct smp_ltk *key, *old_key;
1302
1303         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1304                 return 0;
1305
1306         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1307         if (old_key)
1308                 key = old_key;
1309         else {
1310                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1311                 if (!key)
1312                         return -ENOMEM;
1313                 list_add(&key->list, &hdev->long_term_keys);
1314         }
1315
1316         bacpy(&key->bdaddr, bdaddr);
1317         key->bdaddr_type = addr_type;
1318         memcpy(key->val, tk, sizeof(key->val));
1319         key->authenticated = authenticated;
1320         key->ediv = ediv;
1321         key->enc_size = enc_size;
1322         key->type = type;
1323         memcpy(key->rand, rand, sizeof(key->rand));
1324
1325         if (!new_key)
1326                 return 0;
1327
1328         if (type & HCI_SMP_LTK)
1329                 mgmt_new_ltk(hdev, key, 1);
1330
1331         return 0;
1332 }
1333
1334 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1335 {
1336         struct link_key *key;
1337
1338         key = hci_find_link_key(hdev, bdaddr);
1339         if (!key)
1340                 return -ENOENT;
1341
1342         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1343
1344         list_del(&key->list);
1345         kfree(key);
1346
1347         return 0;
1348 }
1349
1350 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1351 {
1352         struct smp_ltk *k, *tmp;
1353
1354         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1355                 if (bacmp(bdaddr, &k->bdaddr))
1356                         continue;
1357
1358                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1359
1360                 list_del(&k->list);
1361                 kfree(k);
1362         }
1363
1364         return 0;
1365 }
1366
1367 /* HCI command timer function */
1368 static void hci_cmd_timer(unsigned long arg)
1369 {
1370         struct hci_dev *hdev = (void *) arg;
1371
1372         BT_ERR("%s command tx timeout", hdev->name);
1373         atomic_set(&hdev->cmd_cnt, 1);
1374         queue_work(hdev->workqueue, &hdev->cmd_work);
1375 }
1376
1377 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1378                                           bdaddr_t *bdaddr)
1379 {
1380         struct oob_data *data;
1381
1382         list_for_each_entry(data, &hdev->remote_oob_data, list)
1383                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1384                         return data;
1385
1386         return NULL;
1387 }
1388
1389 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1390 {
1391         struct oob_data *data;
1392
1393         data = hci_find_remote_oob_data(hdev, bdaddr);
1394         if (!data)
1395                 return -ENOENT;
1396
1397         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1398
1399         list_del(&data->list);
1400         kfree(data);
1401
1402         return 0;
1403 }
1404
1405 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1406 {
1407         struct oob_data *data, *n;
1408
1409         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1410                 list_del(&data->list);
1411                 kfree(data);
1412         }
1413
1414         return 0;
1415 }
1416
1417 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1418                             u8 *randomizer)
1419 {
1420         struct oob_data *data;
1421
1422         data = hci_find_remote_oob_data(hdev, bdaddr);
1423
1424         if (!data) {
1425                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1426                 if (!data)
1427                         return -ENOMEM;
1428
1429                 bacpy(&data->bdaddr, bdaddr);
1430                 list_add(&data->list, &hdev->remote_oob_data);
1431         }
1432
1433         memcpy(data->hash, hash, sizeof(data->hash));
1434         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1435
1436         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1437
1438         return 0;
1439 }
1440
1441 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1442 {
1443         struct bdaddr_list *b;
1444
1445         list_for_each_entry(b, &hdev->blacklist, list)
1446                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1447                         return b;
1448
1449         return NULL;
1450 }
1451
1452 int hci_blacklist_clear(struct hci_dev *hdev)
1453 {
1454         struct list_head *p, *n;
1455
1456         list_for_each_safe(p, n, &hdev->blacklist) {
1457                 struct bdaddr_list *b;
1458
1459                 b = list_entry(p, struct bdaddr_list, list);
1460
1461                 list_del(p);
1462                 kfree(b);
1463         }
1464
1465         return 0;
1466 }
1467
1468 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1469 {
1470         struct bdaddr_list *entry;
1471
1472         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1473                 return -EBADF;
1474
1475         if (hci_blacklist_lookup(hdev, bdaddr))
1476                 return -EEXIST;
1477
1478         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1479         if (!entry)
1480                 return -ENOMEM;
1481
1482         bacpy(&entry->bdaddr, bdaddr);
1483
1484         list_add(&entry->list, &hdev->blacklist);
1485
1486         return mgmt_device_blocked(hdev, bdaddr, type);
1487 }
1488
1489 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1490 {
1491         struct bdaddr_list *entry;
1492
1493         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1494                 return hci_blacklist_clear(hdev);
1495
1496         entry = hci_blacklist_lookup(hdev, bdaddr);
1497         if (!entry)
1498                 return -ENOENT;
1499
1500         list_del(&entry->list);
1501         kfree(entry);
1502
1503         return mgmt_device_unblocked(hdev, bdaddr, type);
1504 }
1505
1506 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1507 {
1508         struct le_scan_params *param =  (struct le_scan_params *) opt;
1509         struct hci_cp_le_set_scan_param cp;
1510
1511         memset(&cp, 0, sizeof(cp));
1512         cp.type = param->type;
1513         cp.interval = cpu_to_le16(param->interval);
1514         cp.window = cpu_to_le16(param->window);
1515
1516         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1517 }
1518
1519 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1520 {
1521         struct hci_cp_le_set_scan_enable cp;
1522
1523         memset(&cp, 0, sizeof(cp));
1524         cp.enable = 1;
1525
1526         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1527 }
1528
1529 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1530                           u16 window, int timeout)
1531 {
1532         long timeo = msecs_to_jiffies(3000);
1533         struct le_scan_params param;
1534         int err;
1535
1536         BT_DBG("%s", hdev->name);
1537
1538         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1539                 return -EINPROGRESS;
1540
1541         param.type = type;
1542         param.interval = interval;
1543         param.window = window;
1544
1545         hci_req_lock(hdev);
1546
1547         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1548                             timeo);
1549         if (!err)
1550                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1551
1552         hci_req_unlock(hdev);
1553
1554         if (err < 0)
1555                 return err;
1556
1557         schedule_delayed_work(&hdev->le_scan_disable,
1558                               msecs_to_jiffies(timeout));
1559
1560         return 0;
1561 }
1562
1563 int hci_cancel_le_scan(struct hci_dev *hdev)
1564 {
1565         BT_DBG("%s", hdev->name);
1566
1567         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1568                 return -EALREADY;
1569
1570         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1571                 struct hci_cp_le_set_scan_enable cp;
1572
1573                 /* Send HCI command to disable LE Scan */
1574                 memset(&cp, 0, sizeof(cp));
1575                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1576         }
1577
1578         return 0;
1579 }
1580
1581 static void le_scan_disable_work(struct work_struct *work)
1582 {
1583         struct hci_dev *hdev = container_of(work, struct hci_dev,
1584                                             le_scan_disable.work);
1585         struct hci_cp_le_set_scan_enable cp;
1586
1587         BT_DBG("%s", hdev->name);
1588
1589         memset(&cp, 0, sizeof(cp));
1590
1591         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1592 }
1593
1594 static void le_scan_work(struct work_struct *work)
1595 {
1596         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1597         struct le_scan_params *param = &hdev->le_scan_params;
1598
1599         BT_DBG("%s", hdev->name);
1600
1601         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1602                        param->timeout);
1603 }
1604
1605 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1606                 int timeout)
1607 {
1608         struct le_scan_params *param = &hdev->le_scan_params;
1609
1610         BT_DBG("%s", hdev->name);
1611
1612         if (work_busy(&hdev->le_scan))
1613                 return -EINPROGRESS;
1614
1615         param->type = type;
1616         param->interval = interval;
1617         param->window = window;
1618         param->timeout = timeout;
1619
1620         queue_work(system_long_wq, &hdev->le_scan);
1621
1622         return 0;
1623 }
1624
1625 /* Alloc HCI device */
1626 struct hci_dev *hci_alloc_dev(void)
1627 {
1628         struct hci_dev *hdev;
1629
1630         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1631         if (!hdev)
1632                 return NULL;
1633
1634         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1635         hdev->esco_type = (ESCO_HV1);
1636         hdev->link_mode = (HCI_LM_ACCEPT);
1637         hdev->io_capability = 0x03; /* No Input No Output */
1638
1639         hdev->sniff_max_interval = 800;
1640         hdev->sniff_min_interval = 80;
1641
1642         mutex_init(&hdev->lock);
1643         mutex_init(&hdev->req_lock);
1644
1645         INIT_LIST_HEAD(&hdev->mgmt_pending);
1646         INIT_LIST_HEAD(&hdev->blacklist);
1647         INIT_LIST_HEAD(&hdev->uuids);
1648         INIT_LIST_HEAD(&hdev->link_keys);
1649         INIT_LIST_HEAD(&hdev->long_term_keys);
1650         INIT_LIST_HEAD(&hdev->remote_oob_data);
1651
1652         INIT_WORK(&hdev->rx_work, hci_rx_work);
1653         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1654         INIT_WORK(&hdev->tx_work, hci_tx_work);
1655         INIT_WORK(&hdev->power_on, hci_power_on);
1656         INIT_WORK(&hdev->le_scan, le_scan_work);
1657
1658         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1659         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1660         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1661
1662         skb_queue_head_init(&hdev->driver_init);
1663         skb_queue_head_init(&hdev->rx_q);
1664         skb_queue_head_init(&hdev->cmd_q);
1665         skb_queue_head_init(&hdev->raw_q);
1666
1667         init_waitqueue_head(&hdev->req_wait_q);
1668
1669         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1670
1671         hci_init_sysfs(hdev);
1672         discovery_init(hdev);
1673         hci_conn_hash_init(hdev);
1674
1675         return hdev;
1676 }
1677 EXPORT_SYMBOL(hci_alloc_dev);
1678
1679 /* Free HCI device */
1680 void hci_free_dev(struct hci_dev *hdev)
1681 {
1682         skb_queue_purge(&hdev->driver_init);
1683
1684         /* will free via device release */
1685         put_device(&hdev->dev);
1686 }
1687 EXPORT_SYMBOL(hci_free_dev);
1688
1689 /* Register HCI device */
1690 int hci_register_dev(struct hci_dev *hdev)
1691 {
1692         struct list_head *head, *p;
1693         int id, error;
1694
1695         if (!hdev->open || !hdev->close)
1696                 return -EINVAL;
1697
1698         write_lock(&hci_dev_list_lock);
1699
1700         /* Do not allow HCI_AMP devices to register at index 0,
1701          * so the index can be used as the AMP controller ID.
1702          */
1703         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1704         head = &hci_dev_list;
1705
1706         /* Find first available device id */
1707         list_for_each(p, &hci_dev_list) {
1708                 int nid = list_entry(p, struct hci_dev, list)->id;
1709                 if (nid > id)
1710                         break;
1711                 if (nid == id)
1712                         id++;
1713                 head = p;
1714         }
1715
1716         sprintf(hdev->name, "hci%d", id);
1717         hdev->id = id;
1718
1719         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1720
1721         list_add(&hdev->list, head);
1722
1723         write_unlock(&hci_dev_list_lock);
1724
1725         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1726                                           WQ_MEM_RECLAIM, 1);
1727         if (!hdev->workqueue) {
1728                 error = -ENOMEM;
1729                 goto err;
1730         }
1731
1732         error = hci_add_sysfs(hdev);
1733         if (error < 0)
1734                 goto err_wqueue;
1735
1736         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1737                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1738                                     hdev);
1739         if (hdev->rfkill) {
1740                 if (rfkill_register(hdev->rfkill) < 0) {
1741                         rfkill_destroy(hdev->rfkill);
1742                         hdev->rfkill = NULL;
1743                 }
1744         }
1745
1746         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1747         set_bit(HCI_SETUP, &hdev->dev_flags);
1748         schedule_work(&hdev->power_on);
1749
1750         hci_notify(hdev, HCI_DEV_REG);
1751         hci_dev_hold(hdev);
1752
1753         return id;
1754
1755 err_wqueue:
1756         destroy_workqueue(hdev->workqueue);
1757 err:
1758         write_lock(&hci_dev_list_lock);
1759         list_del(&hdev->list);
1760         write_unlock(&hci_dev_list_lock);
1761
1762         return error;
1763 }
1764 EXPORT_SYMBOL(hci_register_dev);
1765
1766 /* Unregister HCI device */
1767 void hci_unregister_dev(struct hci_dev *hdev)
1768 {
1769         int i;
1770
1771         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1772
1773         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1774
1775         write_lock(&hci_dev_list_lock);
1776         list_del(&hdev->list);
1777         write_unlock(&hci_dev_list_lock);
1778
1779         hci_dev_do_close(hdev);
1780
1781         for (i = 0; i < NUM_REASSEMBLY; i++)
1782                 kfree_skb(hdev->reassembly[i]);
1783
1784         if (!test_bit(HCI_INIT, &hdev->flags) &&
1785             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1786                 hci_dev_lock(hdev);
1787                 mgmt_index_removed(hdev);
1788                 hci_dev_unlock(hdev);
1789         }
1790
1791         /* mgmt_index_removed should take care of emptying the
1792          * pending list */
1793         BUG_ON(!list_empty(&hdev->mgmt_pending));
1794
1795         hci_notify(hdev, HCI_DEV_UNREG);
1796
1797         if (hdev->rfkill) {
1798                 rfkill_unregister(hdev->rfkill);
1799                 rfkill_destroy(hdev->rfkill);
1800         }
1801
1802         hci_del_sysfs(hdev);
1803
1804         destroy_workqueue(hdev->workqueue);
1805
1806         hci_dev_lock(hdev);
1807         hci_blacklist_clear(hdev);
1808         hci_uuids_clear(hdev);
1809         hci_link_keys_clear(hdev);
1810         hci_smp_ltks_clear(hdev);
1811         hci_remote_oob_data_clear(hdev);
1812         hci_dev_unlock(hdev);
1813
1814         hci_dev_put(hdev);
1815 }
1816 EXPORT_SYMBOL(hci_unregister_dev);
1817
1818 /* Suspend HCI device */
1819 int hci_suspend_dev(struct hci_dev *hdev)
1820 {
1821         hci_notify(hdev, HCI_DEV_SUSPEND);
1822         return 0;
1823 }
1824 EXPORT_SYMBOL(hci_suspend_dev);
1825
1826 /* Resume HCI device */
1827 int hci_resume_dev(struct hci_dev *hdev)
1828 {
1829         hci_notify(hdev, HCI_DEV_RESUME);
1830         return 0;
1831 }
1832 EXPORT_SYMBOL(hci_resume_dev);
1833
1834 /* Receive frame from HCI drivers */
1835 int hci_recv_frame(struct sk_buff *skb)
1836 {
1837         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1838         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1839                       && !test_bit(HCI_INIT, &hdev->flags))) {
1840                 kfree_skb(skb);
1841                 return -ENXIO;
1842         }
1843
1844         /* Incomming skb */
1845         bt_cb(skb)->incoming = 1;
1846
1847         /* Time stamp */
1848         __net_timestamp(skb);
1849
1850         skb_queue_tail(&hdev->rx_q, skb);
1851         queue_work(hdev->workqueue, &hdev->rx_work);
1852
1853         return 0;
1854 }
1855 EXPORT_SYMBOL(hci_recv_frame);
1856
1857 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1858                           int count, __u8 index)
1859 {
1860         int len = 0;
1861         int hlen = 0;
1862         int remain = count;
1863         struct sk_buff *skb;
1864         struct bt_skb_cb *scb;
1865
1866         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1867             index >= NUM_REASSEMBLY)
1868                 return -EILSEQ;
1869
1870         skb = hdev->reassembly[index];
1871
1872         if (!skb) {
1873                 switch (type) {
1874                 case HCI_ACLDATA_PKT:
1875                         len = HCI_MAX_FRAME_SIZE;
1876                         hlen = HCI_ACL_HDR_SIZE;
1877                         break;
1878                 case HCI_EVENT_PKT:
1879                         len = HCI_MAX_EVENT_SIZE;
1880                         hlen = HCI_EVENT_HDR_SIZE;
1881                         break;
1882                 case HCI_SCODATA_PKT:
1883                         len = HCI_MAX_SCO_SIZE;
1884                         hlen = HCI_SCO_HDR_SIZE;
1885                         break;
1886                 }
1887
1888                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1889                 if (!skb)
1890                         return -ENOMEM;
1891
1892                 scb = (void *) skb->cb;
1893                 scb->expect = hlen;
1894                 scb->pkt_type = type;
1895
1896                 skb->dev = (void *) hdev;
1897                 hdev->reassembly[index] = skb;
1898         }
1899
1900         while (count) {
1901                 scb = (void *) skb->cb;
1902                 len = min_t(uint, scb->expect, count);
1903
1904                 memcpy(skb_put(skb, len), data, len);
1905
1906                 count -= len;
1907                 data += len;
1908                 scb->expect -= len;
1909                 remain = count;
1910
1911                 switch (type) {
1912                 case HCI_EVENT_PKT:
1913                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1914                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1915                                 scb->expect = h->plen;
1916
1917                                 if (skb_tailroom(skb) < scb->expect) {
1918                                         kfree_skb(skb);
1919                                         hdev->reassembly[index] = NULL;
1920                                         return -ENOMEM;
1921                                 }
1922                         }
1923                         break;
1924
1925                 case HCI_ACLDATA_PKT:
1926                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1927                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1928                                 scb->expect = __le16_to_cpu(h->dlen);
1929
1930                                 if (skb_tailroom(skb) < scb->expect) {
1931                                         kfree_skb(skb);
1932                                         hdev->reassembly[index] = NULL;
1933                                         return -ENOMEM;
1934                                 }
1935                         }
1936                         break;
1937
1938                 case HCI_SCODATA_PKT:
1939                         if (skb->len == HCI_SCO_HDR_SIZE) {
1940                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1941                                 scb->expect = h->dlen;
1942
1943                                 if (skb_tailroom(skb) < scb->expect) {
1944                                         kfree_skb(skb);
1945                                         hdev->reassembly[index] = NULL;
1946                                         return -ENOMEM;
1947                                 }
1948                         }
1949                         break;
1950                 }
1951
1952                 if (scb->expect == 0) {
1953                         /* Complete frame */
1954
1955                         bt_cb(skb)->pkt_type = type;
1956                         hci_recv_frame(skb);
1957
1958                         hdev->reassembly[index] = NULL;
1959                         return remain;
1960                 }
1961         }
1962
1963         return remain;
1964 }
1965
1966 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1967 {
1968         int rem = 0;
1969
1970         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1971                 return -EILSEQ;
1972
1973         while (count) {
1974                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1975                 if (rem < 0)
1976                         return rem;
1977
1978                 data += (count - rem);
1979                 count = rem;
1980         }
1981
1982         return rem;
1983 }
1984 EXPORT_SYMBOL(hci_recv_fragment);
1985
1986 #define STREAM_REASSEMBLY 0
1987
1988 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1989 {
1990         int type;
1991         int rem = 0;
1992
1993         while (count) {
1994                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1995
1996                 if (!skb) {
1997                         struct { char type; } *pkt;
1998
1999                         /* Start of the frame */
2000                         pkt = data;
2001                         type = pkt->type;
2002
2003                         data++;
2004                         count--;
2005                 } else
2006                         type = bt_cb(skb)->pkt_type;
2007
2008                 rem = hci_reassembly(hdev, type, data, count,
2009                                      STREAM_REASSEMBLY);
2010                 if (rem < 0)
2011                         return rem;
2012
2013                 data += (count - rem);
2014                 count = rem;
2015         }
2016
2017         return rem;
2018 }
2019 EXPORT_SYMBOL(hci_recv_stream_fragment);
2020
2021 /* ---- Interface to upper protocols ---- */
2022
2023 int hci_register_cb(struct hci_cb *cb)
2024 {
2025         BT_DBG("%p name %s", cb, cb->name);
2026
2027         write_lock(&hci_cb_list_lock);
2028         list_add(&cb->list, &hci_cb_list);
2029         write_unlock(&hci_cb_list_lock);
2030
2031         return 0;
2032 }
2033 EXPORT_SYMBOL(hci_register_cb);
2034
2035 int hci_unregister_cb(struct hci_cb *cb)
2036 {
2037         BT_DBG("%p name %s", cb, cb->name);
2038
2039         write_lock(&hci_cb_list_lock);
2040         list_del(&cb->list);
2041         write_unlock(&hci_cb_list_lock);
2042
2043         return 0;
2044 }
2045 EXPORT_SYMBOL(hci_unregister_cb);
2046
2047 static int hci_send_frame(struct sk_buff *skb)
2048 {
2049         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2050
2051         if (!hdev) {
2052                 kfree_skb(skb);
2053                 return -ENODEV;
2054         }
2055
2056         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2057
2058         /* Time stamp */
2059         __net_timestamp(skb);
2060
2061         /* Send copy to monitor */
2062         hci_send_to_monitor(hdev, skb);
2063
2064         if (atomic_read(&hdev->promisc)) {
2065                 /* Send copy to the sockets */
2066                 hci_send_to_sock(hdev, skb);
2067         }
2068
2069         /* Get rid of skb owner, prior to sending to the driver. */
2070         skb_orphan(skb);
2071
2072         return hdev->send(skb);
2073 }
2074
2075 /* Send HCI command */
2076 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2077 {
2078         int len = HCI_COMMAND_HDR_SIZE + plen;
2079         struct hci_command_hdr *hdr;
2080         struct sk_buff *skb;
2081
2082         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2083
2084         skb = bt_skb_alloc(len, GFP_ATOMIC);
2085         if (!skb) {
2086                 BT_ERR("%s no memory for command", hdev->name);
2087                 return -ENOMEM;
2088         }
2089
2090         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2091         hdr->opcode = cpu_to_le16(opcode);
2092         hdr->plen   = plen;
2093
2094         if (plen)
2095                 memcpy(skb_put(skb, plen), param, plen);
2096
2097         BT_DBG("skb len %d", skb->len);
2098
2099         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2100         skb->dev = (void *) hdev;
2101
2102         if (test_bit(HCI_INIT, &hdev->flags))
2103                 hdev->init_last_cmd = opcode;
2104
2105         skb_queue_tail(&hdev->cmd_q, skb);
2106         queue_work(hdev->workqueue, &hdev->cmd_work);
2107
2108         return 0;
2109 }
2110
2111 /* Get data from the previously sent command */
2112 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2113 {
2114         struct hci_command_hdr *hdr;
2115
2116         if (!hdev->sent_cmd)
2117                 return NULL;
2118
2119         hdr = (void *) hdev->sent_cmd->data;
2120
2121         if (hdr->opcode != cpu_to_le16(opcode))
2122                 return NULL;
2123
2124         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2125
2126         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2127 }
2128
2129 /* Send ACL data */
2130 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2131 {
2132         struct hci_acl_hdr *hdr;
2133         int len = skb->len;
2134
2135         skb_push(skb, HCI_ACL_HDR_SIZE);
2136         skb_reset_transport_header(skb);
2137         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2138         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2139         hdr->dlen   = cpu_to_le16(len);
2140 }
2141
2142 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2143                           struct sk_buff *skb, __u16 flags)
2144 {
2145         struct hci_dev *hdev = conn->hdev;
2146         struct sk_buff *list;
2147
2148         skb->len = skb_headlen(skb);
2149         skb->data_len = 0;
2150
2151         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2152         hci_add_acl_hdr(skb, conn->handle, flags);
2153
2154         list = skb_shinfo(skb)->frag_list;
2155         if (!list) {
2156                 /* Non fragmented */
2157                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2158
2159                 skb_queue_tail(queue, skb);
2160         } else {
2161                 /* Fragmented */
2162                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2163
2164                 skb_shinfo(skb)->frag_list = NULL;
2165
2166                 /* Queue all fragments atomically */
2167                 spin_lock(&queue->lock);
2168
2169                 __skb_queue_tail(queue, skb);
2170
2171                 flags &= ~ACL_START;
2172                 flags |= ACL_CONT;
2173                 do {
2174                         skb = list; list = list->next;
2175
2176                         skb->dev = (void *) hdev;
2177                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2178                         hci_add_acl_hdr(skb, conn->handle, flags);
2179
2180                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2181
2182                         __skb_queue_tail(queue, skb);
2183                 } while (list);
2184
2185                 spin_unlock(&queue->lock);
2186         }
2187 }
2188
2189 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2190 {
2191         struct hci_conn *conn = chan->conn;
2192         struct hci_dev *hdev = conn->hdev;
2193
2194         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2195
2196         skb->dev = (void *) hdev;
2197
2198         hci_queue_acl(conn, &chan->data_q, skb, flags);
2199
2200         queue_work(hdev->workqueue, &hdev->tx_work);
2201 }
2202
2203 /* Send SCO data */
2204 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2205 {
2206         struct hci_dev *hdev = conn->hdev;
2207         struct hci_sco_hdr hdr;
2208
2209         BT_DBG("%s len %d", hdev->name, skb->len);
2210
2211         hdr.handle = cpu_to_le16(conn->handle);
2212         hdr.dlen   = skb->len;
2213
2214         skb_push(skb, HCI_SCO_HDR_SIZE);
2215         skb_reset_transport_header(skb);
2216         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2217
2218         skb->dev = (void *) hdev;
2219         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2220
2221         skb_queue_tail(&conn->data_q, skb);
2222         queue_work(hdev->workqueue, &hdev->tx_work);
2223 }
2224
2225 /* ---- HCI TX task (outgoing data) ---- */
2226
2227 /* HCI Connection scheduler */
2228 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2229                                      int *quote)
2230 {
2231         struct hci_conn_hash *h = &hdev->conn_hash;
2232         struct hci_conn *conn = NULL, *c;
2233         unsigned int num = 0, min = ~0;
2234
2235         /* We don't have to lock device here. Connections are always
2236          * added and removed with TX task disabled. */
2237
2238         rcu_read_lock();
2239
2240         list_for_each_entry_rcu(c, &h->list, list) {
2241                 if (c->type != type || skb_queue_empty(&c->data_q))
2242                         continue;
2243
2244                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2245                         continue;
2246
2247                 num++;
2248
2249                 if (c->sent < min) {
2250                         min  = c->sent;
2251                         conn = c;
2252                 }
2253
2254                 if (hci_conn_num(hdev, type) == num)
2255                         break;
2256         }
2257
2258         rcu_read_unlock();
2259
2260         if (conn) {
2261                 int cnt, q;
2262
2263                 switch (conn->type) {
2264                 case ACL_LINK:
2265                         cnt = hdev->acl_cnt;
2266                         break;
2267                 case SCO_LINK:
2268                 case ESCO_LINK:
2269                         cnt = hdev->sco_cnt;
2270                         break;
2271                 case LE_LINK:
2272                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2273                         break;
2274                 default:
2275                         cnt = 0;
2276                         BT_ERR("Unknown link type");
2277                 }
2278
2279                 q = cnt / num;
2280                 *quote = q ? q : 1;
2281         } else
2282                 *quote = 0;
2283
2284         BT_DBG("conn %p quote %d", conn, *quote);
2285         return conn;
2286 }
2287
2288 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2289 {
2290         struct hci_conn_hash *h = &hdev->conn_hash;
2291         struct hci_conn *c;
2292
2293         BT_ERR("%s link tx timeout", hdev->name);
2294
2295         rcu_read_lock();
2296
2297         /* Kill stalled connections */
2298         list_for_each_entry_rcu(c, &h->list, list) {
2299                 if (c->type == type && c->sent) {
2300                         BT_ERR("%s killing stalled connection %s",
2301                                hdev->name, batostr(&c->dst));
2302                         hci_acl_disconn(c, 0x13);
2303                 }
2304         }
2305
2306         rcu_read_unlock();
2307 }
2308
2309 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2310                                       int *quote)
2311 {
2312         struct hci_conn_hash *h = &hdev->conn_hash;
2313         struct hci_chan *chan = NULL;
2314         unsigned int num = 0, min = ~0, cur_prio = 0;
2315         struct hci_conn *conn;
2316         int cnt, q, conn_num = 0;
2317
2318         BT_DBG("%s", hdev->name);
2319
2320         rcu_read_lock();
2321
2322         list_for_each_entry_rcu(conn, &h->list, list) {
2323                 struct hci_chan *tmp;
2324
2325                 if (conn->type != type)
2326                         continue;
2327
2328                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2329                         continue;
2330
2331                 conn_num++;
2332
2333                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2334                         struct sk_buff *skb;
2335
2336                         if (skb_queue_empty(&tmp->data_q))
2337                                 continue;
2338
2339                         skb = skb_peek(&tmp->data_q);
2340                         if (skb->priority < cur_prio)
2341                                 continue;
2342
2343                         if (skb->priority > cur_prio) {
2344                                 num = 0;
2345                                 min = ~0;
2346                                 cur_prio = skb->priority;
2347                         }
2348
2349                         num++;
2350
2351                         if (conn->sent < min) {
2352                                 min  = conn->sent;
2353                                 chan = tmp;
2354                         }
2355                 }
2356
2357                 if (hci_conn_num(hdev, type) == conn_num)
2358                         break;
2359         }
2360
2361         rcu_read_unlock();
2362
2363         if (!chan)
2364                 return NULL;
2365
2366         switch (chan->conn->type) {
2367         case ACL_LINK:
2368                 cnt = hdev->acl_cnt;
2369                 break;
2370         case SCO_LINK:
2371         case ESCO_LINK:
2372                 cnt = hdev->sco_cnt;
2373                 break;
2374         case LE_LINK:
2375                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2376                 break;
2377         default:
2378                 cnt = 0;
2379                 BT_ERR("Unknown link type");
2380         }
2381
2382         q = cnt / num;
2383         *quote = q ? q : 1;
2384         BT_DBG("chan %p quote %d", chan, *quote);
2385         return chan;
2386 }
2387
2388 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2389 {
2390         struct hci_conn_hash *h = &hdev->conn_hash;
2391         struct hci_conn *conn;
2392         int num = 0;
2393
2394         BT_DBG("%s", hdev->name);
2395
2396         rcu_read_lock();
2397
2398         list_for_each_entry_rcu(conn, &h->list, list) {
2399                 struct hci_chan *chan;
2400
2401                 if (conn->type != type)
2402                         continue;
2403
2404                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2405                         continue;
2406
2407                 num++;
2408
2409                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2410                         struct sk_buff *skb;
2411
2412                         if (chan->sent) {
2413                                 chan->sent = 0;
2414                                 continue;
2415                         }
2416
2417                         if (skb_queue_empty(&chan->data_q))
2418                                 continue;
2419
2420                         skb = skb_peek(&chan->data_q);
2421                         if (skb->priority >= HCI_PRIO_MAX - 1)
2422                                 continue;
2423
2424                         skb->priority = HCI_PRIO_MAX - 1;
2425
2426                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2427                                skb->priority);
2428                 }
2429
2430                 if (hci_conn_num(hdev, type) == num)
2431                         break;
2432         }
2433
2434         rcu_read_unlock();
2435
2436 }
2437
2438 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2439 {
2440         /* Calculate count of blocks used by this packet */
2441         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2442 }
2443
2444 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2445 {
2446         if (!test_bit(HCI_RAW, &hdev->flags)) {
2447                 /* ACL tx timeout must be longer than maximum
2448                  * link supervision timeout (40.9 seconds) */
2449                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2450                                        msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2451                         hci_link_tx_to(hdev, ACL_LINK);
2452         }
2453 }
2454
2455 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2456 {
2457         unsigned int cnt = hdev->acl_cnt;
2458         struct hci_chan *chan;
2459         struct sk_buff *skb;
2460         int quote;
2461
2462         __check_timeout(hdev, cnt);
2463
2464         while (hdev->acl_cnt &&
2465                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2466                 u32 priority = (skb_peek(&chan->data_q))->priority;
2467                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2468                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2469                                skb->len, skb->priority);
2470
2471                         /* Stop if priority has changed */
2472                         if (skb->priority < priority)
2473                                 break;
2474
2475                         skb = skb_dequeue(&chan->data_q);
2476
2477                         hci_conn_enter_active_mode(chan->conn,
2478                                                    bt_cb(skb)->force_active);
2479
2480                         hci_send_frame(skb);
2481                         hdev->acl_last_tx = jiffies;
2482
2483                         hdev->acl_cnt--;
2484                         chan->sent++;
2485                         chan->conn->sent++;
2486                 }
2487         }
2488
2489         if (cnt != hdev->acl_cnt)
2490                 hci_prio_recalculate(hdev, ACL_LINK);
2491 }
2492
2493 static void hci_sched_acl_blk(struct hci_dev *hdev)
2494 {
2495         unsigned int cnt = hdev->block_cnt;
2496         struct hci_chan *chan;
2497         struct sk_buff *skb;
2498         int quote;
2499
2500         __check_timeout(hdev, cnt);
2501
2502         while (hdev->block_cnt > 0 &&
2503                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2504                 u32 priority = (skb_peek(&chan->data_q))->priority;
2505                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2506                         int blocks;
2507
2508                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2509                                skb->len, skb->priority);
2510
2511                         /* Stop if priority has changed */
2512                         if (skb->priority < priority)
2513                                 break;
2514
2515                         skb = skb_dequeue(&chan->data_q);
2516
2517                         blocks = __get_blocks(hdev, skb);
2518                         if (blocks > hdev->block_cnt)
2519                                 return;
2520
2521                         hci_conn_enter_active_mode(chan->conn,
2522                                                    bt_cb(skb)->force_active);
2523
2524                         hci_send_frame(skb);
2525                         hdev->acl_last_tx = jiffies;
2526
2527                         hdev->block_cnt -= blocks;
2528                         quote -= blocks;
2529
2530                         chan->sent += blocks;
2531                         chan->conn->sent += blocks;
2532                 }
2533         }
2534
2535         if (cnt != hdev->block_cnt)
2536                 hci_prio_recalculate(hdev, ACL_LINK);
2537 }
2538
2539 static void hci_sched_acl(struct hci_dev *hdev)
2540 {
2541         BT_DBG("%s", hdev->name);
2542
2543         if (!hci_conn_num(hdev, ACL_LINK))
2544                 return;
2545
2546         switch (hdev->flow_ctl_mode) {
2547         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2548                 hci_sched_acl_pkt(hdev);
2549                 break;
2550
2551         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2552                 hci_sched_acl_blk(hdev);
2553                 break;
2554         }
2555 }
2556
2557 /* Schedule SCO */
2558 static void hci_sched_sco(struct hci_dev *hdev)
2559 {
2560         struct hci_conn *conn;
2561         struct sk_buff *skb;
2562         int quote;
2563
2564         BT_DBG("%s", hdev->name);
2565
2566         if (!hci_conn_num(hdev, SCO_LINK))
2567                 return;
2568
2569         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2570                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2571                         BT_DBG("skb %p len %d", skb, skb->len);
2572                         hci_send_frame(skb);
2573
2574                         conn->sent++;
2575                         if (conn->sent == ~0)
2576                                 conn->sent = 0;
2577                 }
2578         }
2579 }
2580
2581 static void hci_sched_esco(struct hci_dev *hdev)
2582 {
2583         struct hci_conn *conn;
2584         struct sk_buff *skb;
2585         int quote;
2586
2587         BT_DBG("%s", hdev->name);
2588
2589         if (!hci_conn_num(hdev, ESCO_LINK))
2590                 return;
2591
2592         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2593                                                      &quote))) {
2594                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2595                         BT_DBG("skb %p len %d", skb, skb->len);
2596                         hci_send_frame(skb);
2597
2598                         conn->sent++;
2599                         if (conn->sent == ~0)
2600                                 conn->sent = 0;
2601                 }
2602         }
2603 }
2604
2605 static void hci_sched_le(struct hci_dev *hdev)
2606 {
2607         struct hci_chan *chan;
2608         struct sk_buff *skb;
2609         int quote, cnt, tmp;
2610
2611         BT_DBG("%s", hdev->name);
2612
2613         if (!hci_conn_num(hdev, LE_LINK))
2614                 return;
2615
2616         if (!test_bit(HCI_RAW, &hdev->flags)) {
2617                 /* LE tx timeout must be longer than maximum
2618                  * link supervision timeout (40.9 seconds) */
2619                 if (!hdev->le_cnt && hdev->le_pkts &&
2620                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
2621                         hci_link_tx_to(hdev, LE_LINK);
2622         }
2623
2624         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2625         tmp = cnt;
2626         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2627                 u32 priority = (skb_peek(&chan->data_q))->priority;
2628                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2629                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2630                                skb->len, skb->priority);
2631
2632                         /* Stop if priority has changed */
2633                         if (skb->priority < priority)
2634                                 break;
2635
2636                         skb = skb_dequeue(&chan->data_q);
2637
2638                         hci_send_frame(skb);
2639                         hdev->le_last_tx = jiffies;
2640
2641                         cnt--;
2642                         chan->sent++;
2643                         chan->conn->sent++;
2644                 }
2645         }
2646
2647         if (hdev->le_pkts)
2648                 hdev->le_cnt = cnt;
2649         else
2650                 hdev->acl_cnt = cnt;
2651
2652         if (cnt != tmp)
2653                 hci_prio_recalculate(hdev, LE_LINK);
2654 }
2655
2656 static void hci_tx_work(struct work_struct *work)
2657 {
2658         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2659         struct sk_buff *skb;
2660
2661         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2662                hdev->sco_cnt, hdev->le_cnt);
2663
2664         /* Schedule queues and send stuff to HCI driver */
2665
2666         hci_sched_acl(hdev);
2667
2668         hci_sched_sco(hdev);
2669
2670         hci_sched_esco(hdev);
2671
2672         hci_sched_le(hdev);
2673
2674         /* Send next queued raw (unknown type) packet */
2675         while ((skb = skb_dequeue(&hdev->raw_q)))
2676                 hci_send_frame(skb);
2677 }
2678
2679 /* ----- HCI RX task (incoming data processing) ----- */
2680
2681 /* ACL data packet */
2682 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2683 {
2684         struct hci_acl_hdr *hdr = (void *) skb->data;
2685         struct hci_conn *conn;
2686         __u16 handle, flags;
2687
2688         skb_pull(skb, HCI_ACL_HDR_SIZE);
2689
2690         handle = __le16_to_cpu(hdr->handle);
2691         flags  = hci_flags(handle);
2692         handle = hci_handle(handle);
2693
2694         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2695                handle, flags);
2696
2697         hdev->stat.acl_rx++;
2698
2699         hci_dev_lock(hdev);
2700         conn = hci_conn_hash_lookup_handle(hdev, handle);
2701         hci_dev_unlock(hdev);
2702
2703         if (conn) {
2704                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2705
2706                 hci_dev_lock(hdev);
2707                 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2708                     !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2709                         mgmt_device_connected(hdev, &conn->dst, conn->type,
2710                                               conn->dst_type, 0, NULL, 0,
2711                                               conn->dev_class);
2712                 hci_dev_unlock(hdev);
2713
2714                 /* Send to upper protocol */
2715                 l2cap_recv_acldata(conn, skb, flags);
2716                 return;
2717         } else {
2718                 BT_ERR("%s ACL packet for unknown connection handle %d",
2719                        hdev->name, handle);
2720         }
2721
2722         kfree_skb(skb);
2723 }
2724
2725 /* SCO data packet */
2726 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2727 {
2728         struct hci_sco_hdr *hdr = (void *) skb->data;
2729         struct hci_conn *conn;
2730         __u16 handle;
2731
2732         skb_pull(skb, HCI_SCO_HDR_SIZE);
2733
2734         handle = __le16_to_cpu(hdr->handle);
2735
2736         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2737
2738         hdev->stat.sco_rx++;
2739
2740         hci_dev_lock(hdev);
2741         conn = hci_conn_hash_lookup_handle(hdev, handle);
2742         hci_dev_unlock(hdev);
2743
2744         if (conn) {
2745                 /* Send to upper protocol */
2746                 sco_recv_scodata(conn, skb);
2747                 return;
2748         } else {
2749                 BT_ERR("%s SCO packet for unknown connection handle %d",
2750                        hdev->name, handle);
2751         }
2752
2753         kfree_skb(skb);
2754 }
2755
2756 static void hci_rx_work(struct work_struct *work)
2757 {
2758         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2759         struct sk_buff *skb;
2760
2761         BT_DBG("%s", hdev->name);
2762
2763         while ((skb = skb_dequeue(&hdev->rx_q))) {
2764                 /* Send copy to monitor */
2765                 hci_send_to_monitor(hdev, skb);
2766
2767                 if (atomic_read(&hdev->promisc)) {
2768                         /* Send copy to the sockets */
2769                         hci_send_to_sock(hdev, skb);
2770                 }
2771
2772                 if (test_bit(HCI_RAW, &hdev->flags)) {
2773                         kfree_skb(skb);
2774                         continue;
2775                 }
2776
2777                 if (test_bit(HCI_INIT, &hdev->flags)) {
2778                         /* Don't process data packets in this states. */
2779                         switch (bt_cb(skb)->pkt_type) {
2780                         case HCI_ACLDATA_PKT:
2781                         case HCI_SCODATA_PKT:
2782                                 kfree_skb(skb);
2783                                 continue;
2784                         }
2785                 }
2786
2787                 /* Process frame */
2788                 switch (bt_cb(skb)->pkt_type) {
2789                 case HCI_EVENT_PKT:
2790                         BT_DBG("%s Event packet", hdev->name);
2791                         hci_event_packet(hdev, skb);
2792                         break;
2793
2794                 case HCI_ACLDATA_PKT:
2795                         BT_DBG("%s ACL data packet", hdev->name);
2796                         hci_acldata_packet(hdev, skb);
2797                         break;
2798
2799                 case HCI_SCODATA_PKT:
2800                         BT_DBG("%s SCO data packet", hdev->name);
2801                         hci_scodata_packet(hdev, skb);
2802                         break;
2803
2804                 default:
2805                         kfree_skb(skb);
2806                         break;
2807                 }
2808         }
2809 }
2810
2811 static void hci_cmd_work(struct work_struct *work)
2812 {
2813         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2814         struct sk_buff *skb;
2815
2816         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2817
2818         /* Send queued commands */
2819         if (atomic_read(&hdev->cmd_cnt)) {
2820                 skb = skb_dequeue(&hdev->cmd_q);
2821                 if (!skb)
2822                         return;
2823
2824                 kfree_skb(hdev->sent_cmd);
2825
2826                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2827                 if (hdev->sent_cmd) {
2828                         atomic_dec(&hdev->cmd_cnt);
2829                         hci_send_frame(skb);
2830                         if (test_bit(HCI_RESET, &hdev->flags))
2831                                 del_timer(&hdev->cmd_timer);
2832                         else
2833                                 mod_timer(&hdev->cmd_timer,
2834                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2835                 } else {
2836                         skb_queue_head(&hdev->cmd_q, skb);
2837                         queue_work(hdev->workqueue, &hdev->cmd_work);
2838                 }
2839         }
2840 }
2841
2842 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2843 {
2844         /* General inquiry access code (GIAC) */
2845         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2846         struct hci_cp_inquiry cp;
2847
2848         BT_DBG("%s", hdev->name);
2849
2850         if (test_bit(HCI_INQUIRY, &hdev->flags))
2851                 return -EINPROGRESS;
2852
2853         inquiry_cache_flush(hdev);
2854
2855         memset(&cp, 0, sizeof(cp));
2856         memcpy(&cp.lap, lap, sizeof(cp.lap));
2857         cp.length  = length;
2858
2859         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2860 }
2861
2862 int hci_cancel_inquiry(struct hci_dev *hdev)
2863 {
2864         BT_DBG("%s", hdev->name);
2865
2866         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2867                 return -EALREADY;
2868
2869         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2870 }
2871
2872 u8 bdaddr_to_le(u8 bdaddr_type)
2873 {
2874         switch (bdaddr_type) {
2875         case BDADDR_LE_PUBLIC:
2876                 return ADDR_LE_DEV_PUBLIC;
2877
2878         default:
2879                 /* Fallback to LE Random address type */
2880                 return ADDR_LE_DEV_RANDOM;
2881         }
2882 }