OSDN Git Service

decd60198f313675d341c5ef0504dc1653fd220e
[android-x86/kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100         /* If this is the init phase check if the completed command matches
101          * the last init command, and if not just return.
102          */
103         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104                 return;
105
106         if (hdev->req_status == HCI_REQ_PEND) {
107                 hdev->req_result = result;
108                 hdev->req_status = HCI_REQ_DONE;
109                 wake_up_interruptible(&hdev->req_wait_q);
110         }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115         BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117         if (hdev->req_status == HCI_REQ_PEND) {
118                 hdev->req_result = err;
119                 hdev->req_status = HCI_REQ_CANCELED;
120                 wake_up_interruptible(&hdev->req_wait_q);
121         }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126                                         unsigned long opt, __u32 timeout)
127 {
128         DECLARE_WAITQUEUE(wait, current);
129         int err = 0;
130
131         BT_DBG("%s start", hdev->name);
132
133         hdev->req_status = HCI_REQ_PEND;
134
135         add_wait_queue(&hdev->req_wait_q, &wait);
136         set_current_state(TASK_INTERRUPTIBLE);
137
138         req(hdev, opt);
139         schedule_timeout(timeout);
140
141         remove_wait_queue(&hdev->req_wait_q, &wait);
142
143         if (signal_pending(current))
144                 return -EINTR;
145
146         switch (hdev->req_status) {
147         case HCI_REQ_DONE:
148                 err = -bt_err(hdev->req_result);
149                 break;
150
151         case HCI_REQ_CANCELED:
152                 err = -hdev->req_result;
153                 break;
154
155         default:
156                 err = -ETIMEDOUT;
157                 break;
158         }
159
160         hdev->req_status = hdev->req_result = 0;
161
162         BT_DBG("%s end: err %d", hdev->name, err);
163
164         return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168                                         unsigned long opt, __u32 timeout)
169 {
170         int ret;
171
172         if (!test_bit(HCI_UP, &hdev->flags))
173                 return -ENETDOWN;
174
175         /* Serialize all requests */
176         hci_req_lock(hdev);
177         ret = __hci_request(hdev, req, opt, timeout);
178         hci_req_unlock(hdev);
179
180         return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         BT_DBG("%s %ld", hdev->name, opt);
186
187         /* Reset device */
188         set_bit(HCI_RESET, &hdev->flags);
189         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 }
191
192 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193 {
194         struct hci_cp_delete_stored_link_key cp;
195         struct sk_buff *skb;
196         __le16 param;
197         __u8 flt_type;
198
199         BT_DBG("%s %ld", hdev->name, opt);
200
201         /* Driver initialization */
202
203         /* Special commands */
204         while ((skb = skb_dequeue(&hdev->driver_init))) {
205                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
206                 skb->dev = (void *) hdev;
207
208                 skb_queue_tail(&hdev->cmd_q, skb);
209                 tasklet_schedule(&hdev->cmd_task);
210         }
211         skb_queue_purge(&hdev->driver_init);
212
213         /* Mandatory initialization */
214
215         /* Reset */
216         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217                         set_bit(HCI_RESET, &hdev->flags);
218                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
219         }
220
221         /* Read Local Supported Features */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
223
224         /* Read Local Version */
225         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
226
227         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
228         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
229
230 #if 0
231         /* Host buffer size */
232         {
233                 struct hci_cp_host_buffer_size cp;
234                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
235                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
236                 cp.acl_max_pkt = cpu_to_le16(0xffff);
237                 cp.sco_max_pkt = cpu_to_le16(0xffff);
238                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
239         }
240 #endif
241
242         /* Read BD Address */
243         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245         /* Read Class of Device */
246         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248         /* Read Local Name */
249         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
250
251         /* Read Voice Setting */
252         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
253
254         /* Optional initialization */
255
256         /* Clear Event Filters */
257         flt_type = HCI_FLT_CLEAR_ALL;
258         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
259
260         /* Connection accept timeout ~20 secs */
261         param = cpu_to_le16(0x7d00);
262         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
263
264         bacpy(&cp.bdaddr, BDADDR_ANY);
265         cp.delete_all = 1;
266         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
267 }
268
269 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         BT_DBG("%s", hdev->name);
272
273         /* Read LE buffer size */
274         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275 }
276
277 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 scan = opt;
280
281         BT_DBG("%s %x", hdev->name, scan);
282
283         /* Inquiry and Page scans */
284         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
285 }
286
287 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __u8 auth = opt;
290
291         BT_DBG("%s %x", hdev->name, auth);
292
293         /* Authentication */
294         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
295 }
296
297 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298 {
299         __u8 encrypt = opt;
300
301         BT_DBG("%s %x", hdev->name, encrypt);
302
303         /* Encryption */
304         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
305 }
306
307 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308 {
309         __le16 policy = cpu_to_le16(opt);
310
311         BT_DBG("%s %x", hdev->name, policy);
312
313         /* Default link policy */
314         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315 }
316
317 /* Get HCI device by index.
318  * Device is held on return. */
319 struct hci_dev *hci_dev_get(int index)
320 {
321         struct hci_dev *hdev = NULL;
322         struct list_head *p;
323
324         BT_DBG("%d", index);
325
326         if (index < 0)
327                 return NULL;
328
329         read_lock(&hci_dev_list_lock);
330         list_for_each(p, &hci_dev_list) {
331                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332                 if (d->id == index) {
333                         hdev = hci_dev_hold(d);
334                         break;
335                 }
336         }
337         read_unlock(&hci_dev_list_lock);
338         return hdev;
339 }
340
341 /* ---- Inquiry support ---- */
342 static void inquiry_cache_flush(struct hci_dev *hdev)
343 {
344         struct inquiry_cache *cache = &hdev->inq_cache;
345         struct inquiry_entry *next  = cache->list, *e;
346
347         BT_DBG("cache %p", cache);
348
349         cache->list = NULL;
350         while ((e = next)) {
351                 next = e->next;
352                 kfree(e);
353         }
354 }
355
356 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357 {
358         struct inquiry_cache *cache = &hdev->inq_cache;
359         struct inquiry_entry *e;
360
361         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363         for (e = cache->list; e; e = e->next)
364                 if (!bacmp(&e->data.bdaddr, bdaddr))
365                         break;
366         return e;
367 }
368
369 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_entry *ie;
373
374         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
376         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377         if (!ie) {
378                 /* Entry not in the cache. Add new one. */
379                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380                 if (!ie)
381                         return;
382
383                 ie->next = cache->list;
384                 cache->list = ie;
385         }
386
387         memcpy(&ie->data, data, sizeof(*data));
388         ie->timestamp = jiffies;
389         cache->timestamp = jiffies;
390 }
391
392 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393 {
394         struct inquiry_cache *cache = &hdev->inq_cache;
395         struct inquiry_info *info = (struct inquiry_info *) buf;
396         struct inquiry_entry *e;
397         int copied = 0;
398
399         for (e = cache->list; e && copied < num; e = e->next, copied++) {
400                 struct inquiry_data *data = &e->data;
401                 bacpy(&info->bdaddr, &data->bdaddr);
402                 info->pscan_rep_mode    = data->pscan_rep_mode;
403                 info->pscan_period_mode = data->pscan_period_mode;
404                 info->pscan_mode        = data->pscan_mode;
405                 memcpy(info->dev_class, data->dev_class, 3);
406                 info->clock_offset      = data->clock_offset;
407                 info++;
408         }
409
410         BT_DBG("cache %p, copied %d", cache, copied);
411         return copied;
412 }
413
414 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415 {
416         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417         struct hci_cp_inquiry cp;
418
419         BT_DBG("%s", hdev->name);
420
421         if (test_bit(HCI_INQUIRY, &hdev->flags))
422                 return;
423
424         /* Start Inquiry */
425         memcpy(&cp.lap, &ir->lap, 3);
426         cp.length  = ir->length;
427         cp.num_rsp = ir->num_rsp;
428         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
429 }
430
431 int hci_inquiry(void __user *arg)
432 {
433         __u8 __user *ptr = arg;
434         struct hci_inquiry_req ir;
435         struct hci_dev *hdev;
436         int err = 0, do_inquiry = 0, max_rsp;
437         long timeo;
438         __u8 *buf;
439
440         if (copy_from_user(&ir, ptr, sizeof(ir)))
441                 return -EFAULT;
442
443         hdev = hci_dev_get(ir.dev_id);
444         if (!hdev)
445                 return -ENODEV;
446
447         hci_dev_lock_bh(hdev);
448         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
449                                 inquiry_cache_empty(hdev) ||
450                                 ir.flags & IREQ_CACHE_FLUSH) {
451                 inquiry_cache_flush(hdev);
452                 do_inquiry = 1;
453         }
454         hci_dev_unlock_bh(hdev);
455
456         timeo = ir.length * msecs_to_jiffies(2000);
457
458         if (do_inquiry) {
459                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460                 if (err < 0)
461                         goto done;
462         }
463
464         /* for unlimited number of responses we will use buffer with 255 entries */
465         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468          * copy it to the user space.
469          */
470         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
471         if (!buf) {
472                 err = -ENOMEM;
473                 goto done;
474         }
475
476         hci_dev_lock_bh(hdev);
477         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478         hci_dev_unlock_bh(hdev);
479
480         BT_DBG("num_rsp %d", ir.num_rsp);
481
482         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483                 ptr += sizeof(ir);
484                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485                                         ir.num_rsp))
486                         err = -EFAULT;
487         } else
488                 err = -EFAULT;
489
490         kfree(buf);
491
492 done:
493         hci_dev_put(hdev);
494         return err;
495 }
496
497 /* ---- HCI ioctl helpers ---- */
498
499 int hci_dev_open(__u16 dev)
500 {
501         struct hci_dev *hdev;
502         int ret = 0;
503
504         hdev = hci_dev_get(dev);
505         if (!hdev)
506                 return -ENODEV;
507
508         BT_DBG("%s %p", hdev->name, hdev);
509
510         hci_req_lock(hdev);
511
512         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513                 ret = -ERFKILL;
514                 goto done;
515         }
516
517         if (test_bit(HCI_UP, &hdev->flags)) {
518                 ret = -EALREADY;
519                 goto done;
520         }
521
522         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523                 set_bit(HCI_RAW, &hdev->flags);
524
525         /* Treat all non BR/EDR controllers as raw devices for now */
526         if (hdev->dev_type != HCI_BREDR)
527                 set_bit(HCI_RAW, &hdev->flags);
528
529         if (hdev->open(hdev)) {
530                 ret = -EIO;
531                 goto done;
532         }
533
534         if (!test_bit(HCI_RAW, &hdev->flags)) {
535                 atomic_set(&hdev->cmd_cnt, 1);
536                 set_bit(HCI_INIT, &hdev->flags);
537                 hdev->init_last_cmd = 0;
538
539                 ret = __hci_request(hdev, hci_init_req, 0,
540                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
541
542                 if (lmp_le_capable(hdev))
543                         ret = __hci_request(hdev, hci_le_init_req, 0,
544                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
546                 clear_bit(HCI_INIT, &hdev->flags);
547         }
548
549         if (!ret) {
550                 hci_dev_hold(hdev);
551                 set_bit(HCI_UP, &hdev->flags);
552                 hci_notify(hdev, HCI_DEV_UP);
553                 if (!test_bit(HCI_SETUP, &hdev->flags))
554                         mgmt_powered(hdev->id, 1);
555         } else {
556                 /* Init failed, cleanup */
557                 tasklet_kill(&hdev->rx_task);
558                 tasklet_kill(&hdev->tx_task);
559                 tasklet_kill(&hdev->cmd_task);
560
561                 skb_queue_purge(&hdev->cmd_q);
562                 skb_queue_purge(&hdev->rx_q);
563
564                 if (hdev->flush)
565                         hdev->flush(hdev);
566
567                 if (hdev->sent_cmd) {
568                         kfree_skb(hdev->sent_cmd);
569                         hdev->sent_cmd = NULL;
570                 }
571
572                 hdev->close(hdev);
573                 hdev->flags = 0;
574         }
575
576 done:
577         hci_req_unlock(hdev);
578         hci_dev_put(hdev);
579         return ret;
580 }
581
582 static int hci_dev_do_close(struct hci_dev *hdev)
583 {
584         BT_DBG("%s %p", hdev->name, hdev);
585
586         hci_req_cancel(hdev, ENODEV);
587         hci_req_lock(hdev);
588
589         /* Stop timer, it might be running */
590         del_timer_sync(&hdev->cmd_timer);
591
592         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593                 hci_req_unlock(hdev);
594                 return 0;
595         }
596
597         /* Kill RX and TX tasks */
598         tasklet_kill(&hdev->rx_task);
599         tasklet_kill(&hdev->tx_task);
600
601         hci_dev_lock_bh(hdev);
602         inquiry_cache_flush(hdev);
603         hci_conn_hash_flush(hdev);
604         hci_dev_unlock_bh(hdev);
605
606         hci_notify(hdev, HCI_DEV_DOWN);
607
608         if (hdev->flush)
609                 hdev->flush(hdev);
610
611         /* Reset device */
612         skb_queue_purge(&hdev->cmd_q);
613         atomic_set(&hdev->cmd_cnt, 1);
614         if (!test_bit(HCI_RAW, &hdev->flags)) {
615                 set_bit(HCI_INIT, &hdev->flags);
616                 __hci_request(hdev, hci_reset_req, 0,
617                                         msecs_to_jiffies(250));
618                 clear_bit(HCI_INIT, &hdev->flags);
619         }
620
621         /* Kill cmd task */
622         tasklet_kill(&hdev->cmd_task);
623
624         /* Drop queues */
625         skb_queue_purge(&hdev->rx_q);
626         skb_queue_purge(&hdev->cmd_q);
627         skb_queue_purge(&hdev->raw_q);
628
629         /* Drop last sent command */
630         if (hdev->sent_cmd) {
631                 kfree_skb(hdev->sent_cmd);
632                 hdev->sent_cmd = NULL;
633         }
634
635         /* After this point our queues are empty
636          * and no tasks are scheduled. */
637         hdev->close(hdev);
638
639         mgmt_powered(hdev->id, 0);
640
641         /* Clear flags */
642         hdev->flags = 0;
643
644         hci_req_unlock(hdev);
645
646         hci_dev_put(hdev);
647         return 0;
648 }
649
650 int hci_dev_close(__u16 dev)
651 {
652         struct hci_dev *hdev;
653         int err;
654
655         hdev = hci_dev_get(dev);
656         if (!hdev)
657                 return -ENODEV;
658         err = hci_dev_do_close(hdev);
659         hci_dev_put(hdev);
660         return err;
661 }
662
663 int hci_dev_reset(__u16 dev)
664 {
665         struct hci_dev *hdev;
666         int ret = 0;
667
668         hdev = hci_dev_get(dev);
669         if (!hdev)
670                 return -ENODEV;
671
672         hci_req_lock(hdev);
673         tasklet_disable(&hdev->tx_task);
674
675         if (!test_bit(HCI_UP, &hdev->flags))
676                 goto done;
677
678         /* Drop queues */
679         skb_queue_purge(&hdev->rx_q);
680         skb_queue_purge(&hdev->cmd_q);
681
682         hci_dev_lock_bh(hdev);
683         inquiry_cache_flush(hdev);
684         hci_conn_hash_flush(hdev);
685         hci_dev_unlock_bh(hdev);
686
687         if (hdev->flush)
688                 hdev->flush(hdev);
689
690         atomic_set(&hdev->cmd_cnt, 1);
691         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
692
693         if (!test_bit(HCI_RAW, &hdev->flags))
694                 ret = __hci_request(hdev, hci_reset_req, 0,
695                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
696
697 done:
698         tasklet_enable(&hdev->tx_task);
699         hci_req_unlock(hdev);
700         hci_dev_put(hdev);
701         return ret;
702 }
703
704 int hci_dev_reset_stat(__u16 dev)
705 {
706         struct hci_dev *hdev;
707         int ret = 0;
708
709         hdev = hci_dev_get(dev);
710         if (!hdev)
711                 return -ENODEV;
712
713         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714
715         hci_dev_put(hdev);
716
717         return ret;
718 }
719
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
721 {
722         struct hci_dev *hdev;
723         struct hci_dev_req dr;
724         int err = 0;
725
726         if (copy_from_user(&dr, arg, sizeof(dr)))
727                 return -EFAULT;
728
729         hdev = hci_dev_get(dr.dev_id);
730         if (!hdev)
731                 return -ENODEV;
732
733         switch (cmd) {
734         case HCISETAUTH:
735                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
737                 break;
738
739         case HCISETENCRYPT:
740                 if (!lmp_encrypt_capable(hdev)) {
741                         err = -EOPNOTSUPP;
742                         break;
743                 }
744
745                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746                         /* Auth must be enabled first */
747                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
749                         if (err)
750                                 break;
751                 }
752
753                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
755                 break;
756
757         case HCISETSCAN:
758                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
760                 break;
761
762         case HCISETLINKPOL:
763                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
765                 break;
766
767         case HCISETLINKMODE:
768                 hdev->link_mode = ((__u16) dr.dev_opt) &
769                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
770                 break;
771
772         case HCISETPTYPE:
773                 hdev->pkt_type = (__u16) dr.dev_opt;
774                 break;
775
776         case HCISETACLMTU:
777                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
778                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
779                 break;
780
781         case HCISETSCOMTU:
782                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
783                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
784                 break;
785
786         default:
787                 err = -EINVAL;
788                 break;
789         }
790
791         hci_dev_put(hdev);
792         return err;
793 }
794
795 int hci_get_dev_list(void __user *arg)
796 {
797         struct hci_dev_list_req *dl;
798         struct hci_dev_req *dr;
799         struct list_head *p;
800         int n = 0, size, err;
801         __u16 dev_num;
802
803         if (get_user(dev_num, (__u16 __user *) arg))
804                 return -EFAULT;
805
806         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807                 return -EINVAL;
808
809         size = sizeof(*dl) + dev_num * sizeof(*dr);
810
811         dl = kzalloc(size, GFP_KERNEL);
812         if (!dl)
813                 return -ENOMEM;
814
815         dr = dl->dev_req;
816
817         read_lock_bh(&hci_dev_list_lock);
818         list_for_each(p, &hci_dev_list) {
819                 struct hci_dev *hdev;
820
821                 hdev = list_entry(p, struct hci_dev, list);
822
823                 hci_del_off_timer(hdev);
824
825                 if (!test_bit(HCI_MGMT, &hdev->flags))
826                         set_bit(HCI_PAIRABLE, &hdev->flags);
827
828                 (dr + n)->dev_id  = hdev->id;
829                 (dr + n)->dev_opt = hdev->flags;
830
831                 if (++n >= dev_num)
832                         break;
833         }
834         read_unlock_bh(&hci_dev_list_lock);
835
836         dl->dev_num = n;
837         size = sizeof(*dl) + n * sizeof(*dr);
838
839         err = copy_to_user(arg, dl, size);
840         kfree(dl);
841
842         return err ? -EFAULT : 0;
843 }
844
845 int hci_get_dev_info(void __user *arg)
846 {
847         struct hci_dev *hdev;
848         struct hci_dev_info di;
849         int err = 0;
850
851         if (copy_from_user(&di, arg, sizeof(di)))
852                 return -EFAULT;
853
854         hdev = hci_dev_get(di.dev_id);
855         if (!hdev)
856                 return -ENODEV;
857
858         hci_del_off_timer(hdev);
859
860         if (!test_bit(HCI_MGMT, &hdev->flags))
861                 set_bit(HCI_PAIRABLE, &hdev->flags);
862
863         strcpy(di.name, hdev->name);
864         di.bdaddr   = hdev->bdaddr;
865         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866         di.flags    = hdev->flags;
867         di.pkt_type = hdev->pkt_type;
868         di.acl_mtu  = hdev->acl_mtu;
869         di.acl_pkts = hdev->acl_pkts;
870         di.sco_mtu  = hdev->sco_mtu;
871         di.sco_pkts = hdev->sco_pkts;
872         di.link_policy = hdev->link_policy;
873         di.link_mode   = hdev->link_mode;
874
875         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876         memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878         if (copy_to_user(arg, &di, sizeof(di)))
879                 err = -EFAULT;
880
881         hci_dev_put(hdev);
882
883         return err;
884 }
885
886 /* ---- Interface to HCI drivers ---- */
887
888 static int hci_rfkill_set_block(void *data, bool blocked)
889 {
890         struct hci_dev *hdev = data;
891
892         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894         if (!blocked)
895                 return 0;
896
897         hci_dev_do_close(hdev);
898
899         return 0;
900 }
901
902 static const struct rfkill_ops hci_rfkill_ops = {
903         .set_block = hci_rfkill_set_block,
904 };
905
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
908 {
909         struct hci_dev *hdev;
910
911         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912         if (!hdev)
913                 return NULL;
914
915         skb_queue_head_init(&hdev->driver_init);
916
917         return hdev;
918 }
919 EXPORT_SYMBOL(hci_alloc_dev);
920
921 /* Free HCI device */
922 void hci_free_dev(struct hci_dev *hdev)
923 {
924         skb_queue_purge(&hdev->driver_init);
925
926         /* will free via device release */
927         put_device(&hdev->dev);
928 }
929 EXPORT_SYMBOL(hci_free_dev);
930
931 static void hci_power_on(struct work_struct *work)
932 {
933         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935         BT_DBG("%s", hdev->name);
936
937         if (hci_dev_open(hdev->id) < 0)
938                 return;
939
940         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941                 mod_timer(&hdev->off_timer,
942                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945                 mgmt_index_added(hdev->id);
946 }
947
948 static void hci_power_off(struct work_struct *work)
949 {
950         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952         BT_DBG("%s", hdev->name);
953
954         hci_dev_close(hdev->id);
955 }
956
957 static void hci_auto_off(unsigned long data)
958 {
959         struct hci_dev *hdev = (struct hci_dev *) data;
960
961         BT_DBG("%s", hdev->name);
962
963         clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965         queue_work(hdev->workqueue, &hdev->power_off);
966 }
967
968 void hci_del_off_timer(struct hci_dev *hdev)
969 {
970         BT_DBG("%s", hdev->name);
971
972         clear_bit(HCI_AUTO_OFF, &hdev->flags);
973         del_timer(&hdev->off_timer);
974 }
975
976 int hci_uuids_clear(struct hci_dev *hdev)
977 {
978         struct list_head *p, *n;
979
980         list_for_each_safe(p, n, &hdev->uuids) {
981                 struct bt_uuid *uuid;
982
983                 uuid = list_entry(p, struct bt_uuid, list);
984
985                 list_del(p);
986                 kfree(uuid);
987         }
988
989         return 0;
990 }
991
992 int hci_link_keys_clear(struct hci_dev *hdev)
993 {
994         struct list_head *p, *n;
995
996         list_for_each_safe(p, n, &hdev->link_keys) {
997                 struct link_key *key;
998
999                 key = list_entry(p, struct link_key, list);
1000
1001                 list_del(p);
1002                 kfree(key);
1003         }
1004
1005         return 0;
1006 }
1007
1008 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009 {
1010         struct list_head *p;
1011
1012         list_for_each(p, &hdev->link_keys) {
1013                 struct link_key *k;
1014
1015                 k = list_entry(p, struct link_key, list);
1016
1017                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1018                         return k;
1019         }
1020
1021         return NULL;
1022 }
1023
1024 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1025                                                 u8 *val, u8 type, u8 pin_len)
1026 {
1027         struct link_key *key, *old_key;
1028         u8 old_key_type;
1029
1030         old_key = hci_find_link_key(hdev, bdaddr);
1031         if (old_key) {
1032                 old_key_type = old_key->type;
1033                 key = old_key;
1034         } else {
1035                 old_key_type = 0xff;
1036                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037                 if (!key)
1038                         return -ENOMEM;
1039                 list_add(&key->list, &hdev->link_keys);
1040         }
1041
1042         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043
1044         bacpy(&key->bdaddr, bdaddr);
1045         memcpy(key->val, val, 16);
1046         key->type = type;
1047         key->pin_len = pin_len;
1048
1049         if (new_key)
1050                 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052         if (type == 0x06)
1053                 key->type = old_key_type;
1054
1055         return 0;
1056 }
1057
1058 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1059 {
1060         struct link_key *key;
1061
1062         key = hci_find_link_key(hdev, bdaddr);
1063         if (!key)
1064                 return -ENOENT;
1065
1066         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1067
1068         list_del(&key->list);
1069         kfree(key);
1070
1071         return 0;
1072 }
1073
1074 /* HCI command timer function */
1075 static void hci_cmd_timer(unsigned long arg)
1076 {
1077         struct hci_dev *hdev = (void *) arg;
1078
1079         BT_ERR("%s command tx timeout", hdev->name);
1080         atomic_set(&hdev->cmd_cnt, 1);
1081         clear_bit(HCI_RESET, &hdev->flags);
1082         tasklet_schedule(&hdev->cmd_task);
1083 }
1084
1085 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1086                                                         bdaddr_t *bdaddr)
1087 {
1088         struct oob_data *data;
1089
1090         list_for_each_entry(data, &hdev->remote_oob_data, list)
1091                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1092                         return data;
1093
1094         return NULL;
1095 }
1096
1097 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1098 {
1099         struct oob_data *data;
1100
1101         data = hci_find_remote_oob_data(hdev, bdaddr);
1102         if (!data)
1103                 return -ENOENT;
1104
1105         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1106
1107         list_del(&data->list);
1108         kfree(data);
1109
1110         return 0;
1111 }
1112
1113 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1114 {
1115         struct oob_data *data, *n;
1116
1117         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1118                 list_del(&data->list);
1119                 kfree(data);
1120         }
1121
1122         return 0;
1123 }
1124
1125 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1126                                                                 u8 *randomizer)
1127 {
1128         struct oob_data *data;
1129
1130         data = hci_find_remote_oob_data(hdev, bdaddr);
1131
1132         if (!data) {
1133                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1134                 if (!data)
1135                         return -ENOMEM;
1136
1137                 bacpy(&data->bdaddr, bdaddr);
1138                 list_add(&data->list, &hdev->remote_oob_data);
1139         }
1140
1141         memcpy(data->hash, hash, sizeof(data->hash));
1142         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1143
1144         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1145
1146         return 0;
1147 }
1148
1149 /* Register HCI device */
1150 int hci_register_dev(struct hci_dev *hdev)
1151 {
1152         struct list_head *head = &hci_dev_list, *p;
1153         int i, id = 0;
1154
1155         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1156                                                 hdev->bus, hdev->owner);
1157
1158         if (!hdev->open || !hdev->close || !hdev->destruct)
1159                 return -EINVAL;
1160
1161         write_lock_bh(&hci_dev_list_lock);
1162
1163         /* Find first available device id */
1164         list_for_each(p, &hci_dev_list) {
1165                 if (list_entry(p, struct hci_dev, list)->id != id)
1166                         break;
1167                 head = p; id++;
1168         }
1169
1170         sprintf(hdev->name, "hci%d", id);
1171         hdev->id = id;
1172         list_add(&hdev->list, head);
1173
1174         atomic_set(&hdev->refcnt, 1);
1175         spin_lock_init(&hdev->lock);
1176
1177         hdev->flags = 0;
1178         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1179         hdev->esco_type = (ESCO_HV1);
1180         hdev->link_mode = (HCI_LM_ACCEPT);
1181         hdev->io_capability = 0x03; /* No Input No Output */
1182
1183         hdev->idle_timeout = 0;
1184         hdev->sniff_max_interval = 800;
1185         hdev->sniff_min_interval = 80;
1186
1187         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1188         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1189         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1190
1191         skb_queue_head_init(&hdev->rx_q);
1192         skb_queue_head_init(&hdev->cmd_q);
1193         skb_queue_head_init(&hdev->raw_q);
1194
1195         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1196
1197         for (i = 0; i < NUM_REASSEMBLY; i++)
1198                 hdev->reassembly[i] = NULL;
1199
1200         init_waitqueue_head(&hdev->req_wait_q);
1201         mutex_init(&hdev->req_lock);
1202
1203         inquiry_cache_init(hdev);
1204
1205         hci_conn_hash_init(hdev);
1206
1207         INIT_LIST_HEAD(&hdev->blacklist);
1208
1209         INIT_LIST_HEAD(&hdev->uuids);
1210
1211         INIT_LIST_HEAD(&hdev->link_keys);
1212
1213         INIT_LIST_HEAD(&hdev->remote_oob_data);
1214
1215         INIT_WORK(&hdev->power_on, hci_power_on);
1216         INIT_WORK(&hdev->power_off, hci_power_off);
1217         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1218
1219         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1220
1221         atomic_set(&hdev->promisc, 0);
1222
1223         write_unlock_bh(&hci_dev_list_lock);
1224
1225         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1226         if (!hdev->workqueue)
1227                 goto nomem;
1228
1229         hci_register_sysfs(hdev);
1230
1231         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1232                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1233         if (hdev->rfkill) {
1234                 if (rfkill_register(hdev->rfkill) < 0) {
1235                         rfkill_destroy(hdev->rfkill);
1236                         hdev->rfkill = NULL;
1237                 }
1238         }
1239
1240         set_bit(HCI_AUTO_OFF, &hdev->flags);
1241         set_bit(HCI_SETUP, &hdev->flags);
1242         queue_work(hdev->workqueue, &hdev->power_on);
1243
1244         hci_notify(hdev, HCI_DEV_REG);
1245
1246         return id;
1247
1248 nomem:
1249         write_lock_bh(&hci_dev_list_lock);
1250         list_del(&hdev->list);
1251         write_unlock_bh(&hci_dev_list_lock);
1252
1253         return -ENOMEM;
1254 }
1255 EXPORT_SYMBOL(hci_register_dev);
1256
1257 /* Unregister HCI device */
1258 int hci_unregister_dev(struct hci_dev *hdev)
1259 {
1260         int i;
1261
1262         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1263
1264         write_lock_bh(&hci_dev_list_lock);
1265         list_del(&hdev->list);
1266         write_unlock_bh(&hci_dev_list_lock);
1267
1268         hci_dev_do_close(hdev);
1269
1270         for (i = 0; i < NUM_REASSEMBLY; i++)
1271                 kfree_skb(hdev->reassembly[i]);
1272
1273         if (!test_bit(HCI_INIT, &hdev->flags) &&
1274                                         !test_bit(HCI_SETUP, &hdev->flags))
1275                 mgmt_index_removed(hdev->id);
1276
1277         hci_notify(hdev, HCI_DEV_UNREG);
1278
1279         if (hdev->rfkill) {
1280                 rfkill_unregister(hdev->rfkill);
1281                 rfkill_destroy(hdev->rfkill);
1282         }
1283
1284         hci_unregister_sysfs(hdev);
1285
1286         hci_del_off_timer(hdev);
1287
1288         destroy_workqueue(hdev->workqueue);
1289
1290         hci_dev_lock_bh(hdev);
1291         hci_blacklist_clear(hdev);
1292         hci_uuids_clear(hdev);
1293         hci_link_keys_clear(hdev);
1294         hci_remote_oob_data_clear(hdev);
1295         hci_dev_unlock_bh(hdev);
1296
1297         __hci_dev_put(hdev);
1298
1299         return 0;
1300 }
1301 EXPORT_SYMBOL(hci_unregister_dev);
1302
1303 /* Suspend HCI device */
1304 int hci_suspend_dev(struct hci_dev *hdev)
1305 {
1306         hci_notify(hdev, HCI_DEV_SUSPEND);
1307         return 0;
1308 }
1309 EXPORT_SYMBOL(hci_suspend_dev);
1310
1311 /* Resume HCI device */
1312 int hci_resume_dev(struct hci_dev *hdev)
1313 {
1314         hci_notify(hdev, HCI_DEV_RESUME);
1315         return 0;
1316 }
1317 EXPORT_SYMBOL(hci_resume_dev);
1318
1319 /* Receive frame from HCI drivers */
1320 int hci_recv_frame(struct sk_buff *skb)
1321 {
1322         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1323         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1324                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1325                 kfree_skb(skb);
1326                 return -ENXIO;
1327         }
1328
1329         /* Incomming skb */
1330         bt_cb(skb)->incoming = 1;
1331
1332         /* Time stamp */
1333         __net_timestamp(skb);
1334
1335         /* Queue frame for rx task */
1336         skb_queue_tail(&hdev->rx_q, skb);
1337         tasklet_schedule(&hdev->rx_task);
1338
1339         return 0;
1340 }
1341 EXPORT_SYMBOL(hci_recv_frame);
1342
1343 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1344                           int count, __u8 index, gfp_t gfp_mask)
1345 {
1346         int len = 0;
1347         int hlen = 0;
1348         int remain = count;
1349         struct sk_buff *skb;
1350         struct bt_skb_cb *scb;
1351
1352         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1353                                 index >= NUM_REASSEMBLY)
1354                 return -EILSEQ;
1355
1356         skb = hdev->reassembly[index];
1357
1358         if (!skb) {
1359                 switch (type) {
1360                 case HCI_ACLDATA_PKT:
1361                         len = HCI_MAX_FRAME_SIZE;
1362                         hlen = HCI_ACL_HDR_SIZE;
1363                         break;
1364                 case HCI_EVENT_PKT:
1365                         len = HCI_MAX_EVENT_SIZE;
1366                         hlen = HCI_EVENT_HDR_SIZE;
1367                         break;
1368                 case HCI_SCODATA_PKT:
1369                         len = HCI_MAX_SCO_SIZE;
1370                         hlen = HCI_SCO_HDR_SIZE;
1371                         break;
1372                 }
1373
1374                 skb = bt_skb_alloc(len, gfp_mask);
1375                 if (!skb)
1376                         return -ENOMEM;
1377
1378                 scb = (void *) skb->cb;
1379                 scb->expect = hlen;
1380                 scb->pkt_type = type;
1381
1382                 skb->dev = (void *) hdev;
1383                 hdev->reassembly[index] = skb;
1384         }
1385
1386         while (count) {
1387                 scb = (void *) skb->cb;
1388                 len = min(scb->expect, (__u16)count);
1389
1390                 memcpy(skb_put(skb, len), data, len);
1391
1392                 count -= len;
1393                 data += len;
1394                 scb->expect -= len;
1395                 remain = count;
1396
1397                 switch (type) {
1398                 case HCI_EVENT_PKT:
1399                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1400                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1401                                 scb->expect = h->plen;
1402
1403                                 if (skb_tailroom(skb) < scb->expect) {
1404                                         kfree_skb(skb);
1405                                         hdev->reassembly[index] = NULL;
1406                                         return -ENOMEM;
1407                                 }
1408                         }
1409                         break;
1410
1411                 case HCI_ACLDATA_PKT:
1412                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1413                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1414                                 scb->expect = __le16_to_cpu(h->dlen);
1415
1416                                 if (skb_tailroom(skb) < scb->expect) {
1417                                         kfree_skb(skb);
1418                                         hdev->reassembly[index] = NULL;
1419                                         return -ENOMEM;
1420                                 }
1421                         }
1422                         break;
1423
1424                 case HCI_SCODATA_PKT:
1425                         if (skb->len == HCI_SCO_HDR_SIZE) {
1426                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1427                                 scb->expect = h->dlen;
1428
1429                                 if (skb_tailroom(skb) < scb->expect) {
1430                                         kfree_skb(skb);
1431                                         hdev->reassembly[index] = NULL;
1432                                         return -ENOMEM;
1433                                 }
1434                         }
1435                         break;
1436                 }
1437
1438                 if (scb->expect == 0) {
1439                         /* Complete frame */
1440
1441                         bt_cb(skb)->pkt_type = type;
1442                         hci_recv_frame(skb);
1443
1444                         hdev->reassembly[index] = NULL;
1445                         return remain;
1446                 }
1447         }
1448
1449         return remain;
1450 }
1451
1452 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1453 {
1454         int rem = 0;
1455
1456         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1457                 return -EILSEQ;
1458
1459         while (count) {
1460                 rem = hci_reassembly(hdev, type, data, count,
1461                                                 type - 1, GFP_ATOMIC);
1462                 if (rem < 0)
1463                         return rem;
1464
1465                 data += (count - rem);
1466                 count = rem;
1467         };
1468
1469         return rem;
1470 }
1471 EXPORT_SYMBOL(hci_recv_fragment);
1472
1473 #define STREAM_REASSEMBLY 0
1474
1475 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1476 {
1477         int type;
1478         int rem = 0;
1479
1480         while (count) {
1481                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1482
1483                 if (!skb) {
1484                         struct { char type; } *pkt;
1485
1486                         /* Start of the frame */
1487                         pkt = data;
1488                         type = pkt->type;
1489
1490                         data++;
1491                         count--;
1492                 } else
1493                         type = bt_cb(skb)->pkt_type;
1494
1495                 rem = hci_reassembly(hdev, type, data,
1496                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1497                 if (rem < 0)
1498                         return rem;
1499
1500                 data += (count - rem);
1501                 count = rem;
1502         };
1503
1504         return rem;
1505 }
1506 EXPORT_SYMBOL(hci_recv_stream_fragment);
1507
1508 /* ---- Interface to upper protocols ---- */
1509
1510 /* Register/Unregister protocols.
1511  * hci_task_lock is used to ensure that no tasks are running. */
1512 int hci_register_proto(struct hci_proto *hp)
1513 {
1514         int err = 0;
1515
1516         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1517
1518         if (hp->id >= HCI_MAX_PROTO)
1519                 return -EINVAL;
1520
1521         write_lock_bh(&hci_task_lock);
1522
1523         if (!hci_proto[hp->id])
1524                 hci_proto[hp->id] = hp;
1525         else
1526                 err = -EEXIST;
1527
1528         write_unlock_bh(&hci_task_lock);
1529
1530         return err;
1531 }
1532 EXPORT_SYMBOL(hci_register_proto);
1533
1534 int hci_unregister_proto(struct hci_proto *hp)
1535 {
1536         int err = 0;
1537
1538         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1539
1540         if (hp->id >= HCI_MAX_PROTO)
1541                 return -EINVAL;
1542
1543         write_lock_bh(&hci_task_lock);
1544
1545         if (hci_proto[hp->id])
1546                 hci_proto[hp->id] = NULL;
1547         else
1548                 err = -ENOENT;
1549
1550         write_unlock_bh(&hci_task_lock);
1551
1552         return err;
1553 }
1554 EXPORT_SYMBOL(hci_unregister_proto);
1555
1556 int hci_register_cb(struct hci_cb *cb)
1557 {
1558         BT_DBG("%p name %s", cb, cb->name);
1559
1560         write_lock_bh(&hci_cb_list_lock);
1561         list_add(&cb->list, &hci_cb_list);
1562         write_unlock_bh(&hci_cb_list_lock);
1563
1564         return 0;
1565 }
1566 EXPORT_SYMBOL(hci_register_cb);
1567
1568 int hci_unregister_cb(struct hci_cb *cb)
1569 {
1570         BT_DBG("%p name %s", cb, cb->name);
1571
1572         write_lock_bh(&hci_cb_list_lock);
1573         list_del(&cb->list);
1574         write_unlock_bh(&hci_cb_list_lock);
1575
1576         return 0;
1577 }
1578 EXPORT_SYMBOL(hci_unregister_cb);
1579
1580 static int hci_send_frame(struct sk_buff *skb)
1581 {
1582         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1583
1584         if (!hdev) {
1585                 kfree_skb(skb);
1586                 return -ENODEV;
1587         }
1588
1589         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1590
1591         if (atomic_read(&hdev->promisc)) {
1592                 /* Time stamp */
1593                 __net_timestamp(skb);
1594
1595                 hci_send_to_sock(hdev, skb, NULL);
1596         }
1597
1598         /* Get rid of skb owner, prior to sending to the driver. */
1599         skb_orphan(skb);
1600
1601         return hdev->send(skb);
1602 }
1603
1604 /* Send HCI command */
1605 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1606 {
1607         int len = HCI_COMMAND_HDR_SIZE + plen;
1608         struct hci_command_hdr *hdr;
1609         struct sk_buff *skb;
1610
1611         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1612
1613         skb = bt_skb_alloc(len, GFP_ATOMIC);
1614         if (!skb) {
1615                 BT_ERR("%s no memory for command", hdev->name);
1616                 return -ENOMEM;
1617         }
1618
1619         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1620         hdr->opcode = cpu_to_le16(opcode);
1621         hdr->plen   = plen;
1622
1623         if (plen)
1624                 memcpy(skb_put(skb, plen), param, plen);
1625
1626         BT_DBG("skb len %d", skb->len);
1627
1628         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1629         skb->dev = (void *) hdev;
1630
1631         if (test_bit(HCI_INIT, &hdev->flags))
1632                 hdev->init_last_cmd = opcode;
1633
1634         skb_queue_tail(&hdev->cmd_q, skb);
1635         tasklet_schedule(&hdev->cmd_task);
1636
1637         return 0;
1638 }
1639
1640 /* Get data from the previously sent command */
1641 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1642 {
1643         struct hci_command_hdr *hdr;
1644
1645         if (!hdev->sent_cmd)
1646                 return NULL;
1647
1648         hdr = (void *) hdev->sent_cmd->data;
1649
1650         if (hdr->opcode != cpu_to_le16(opcode))
1651                 return NULL;
1652
1653         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1654
1655         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1656 }
1657
1658 /* Send ACL data */
1659 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1660 {
1661         struct hci_acl_hdr *hdr;
1662         int len = skb->len;
1663
1664         skb_push(skb, HCI_ACL_HDR_SIZE);
1665         skb_reset_transport_header(skb);
1666         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1667         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1668         hdr->dlen   = cpu_to_le16(len);
1669 }
1670
1671 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1672 {
1673         struct hci_dev *hdev = conn->hdev;
1674         struct sk_buff *list;
1675
1676         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1677
1678         skb->dev = (void *) hdev;
1679         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1680         hci_add_acl_hdr(skb, conn->handle, flags);
1681
1682         list = skb_shinfo(skb)->frag_list;
1683         if (!list) {
1684                 /* Non fragmented */
1685                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1686
1687                 skb_queue_tail(&conn->data_q, skb);
1688         } else {
1689                 /* Fragmented */
1690                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1691
1692                 skb_shinfo(skb)->frag_list = NULL;
1693
1694                 /* Queue all fragments atomically */
1695                 spin_lock_bh(&conn->data_q.lock);
1696
1697                 __skb_queue_tail(&conn->data_q, skb);
1698
1699                 flags &= ~ACL_START;
1700                 flags |= ACL_CONT;
1701                 do {
1702                         skb = list; list = list->next;
1703
1704                         skb->dev = (void *) hdev;
1705                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1706                         hci_add_acl_hdr(skb, conn->handle, flags);
1707
1708                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1709
1710                         __skb_queue_tail(&conn->data_q, skb);
1711                 } while (list);
1712
1713                 spin_unlock_bh(&conn->data_q.lock);
1714         }
1715
1716         tasklet_schedule(&hdev->tx_task);
1717 }
1718 EXPORT_SYMBOL(hci_send_acl);
1719
1720 /* Send SCO data */
1721 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1722 {
1723         struct hci_dev *hdev = conn->hdev;
1724         struct hci_sco_hdr hdr;
1725
1726         BT_DBG("%s len %d", hdev->name, skb->len);
1727
1728         hdr.handle = cpu_to_le16(conn->handle);
1729         hdr.dlen   = skb->len;
1730
1731         skb_push(skb, HCI_SCO_HDR_SIZE);
1732         skb_reset_transport_header(skb);
1733         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1734
1735         skb->dev = (void *) hdev;
1736         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1737
1738         skb_queue_tail(&conn->data_q, skb);
1739         tasklet_schedule(&hdev->tx_task);
1740 }
1741 EXPORT_SYMBOL(hci_send_sco);
1742
1743 /* ---- HCI TX task (outgoing data) ---- */
1744
1745 /* HCI Connection scheduler */
1746 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1747 {
1748         struct hci_conn_hash *h = &hdev->conn_hash;
1749         struct hci_conn *conn = NULL;
1750         int num = 0, min = ~0;
1751         struct list_head *p;
1752
1753         /* We don't have to lock device here. Connections are always
1754          * added and removed with TX task disabled. */
1755         list_for_each(p, &h->list) {
1756                 struct hci_conn *c;
1757                 c = list_entry(p, struct hci_conn, list);
1758
1759                 if (c->type != type || skb_queue_empty(&c->data_q))
1760                         continue;
1761
1762                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1763                         continue;
1764
1765                 num++;
1766
1767                 if (c->sent < min) {
1768                         min  = c->sent;
1769                         conn = c;
1770                 }
1771         }
1772
1773         if (conn) {
1774                 int cnt, q;
1775
1776                 switch (conn->type) {
1777                 case ACL_LINK:
1778                         cnt = hdev->acl_cnt;
1779                         break;
1780                 case SCO_LINK:
1781                 case ESCO_LINK:
1782                         cnt = hdev->sco_cnt;
1783                         break;
1784                 case LE_LINK:
1785                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1786                         break;
1787                 default:
1788                         cnt = 0;
1789                         BT_ERR("Unknown link type");
1790                 }
1791
1792                 q = cnt / num;
1793                 *quote = q ? q : 1;
1794         } else
1795                 *quote = 0;
1796
1797         BT_DBG("conn %p quote %d", conn, *quote);
1798         return conn;
1799 }
1800
1801 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1802 {
1803         struct hci_conn_hash *h = &hdev->conn_hash;
1804         struct list_head *p;
1805         struct hci_conn  *c;
1806
1807         BT_ERR("%s link tx timeout", hdev->name);
1808
1809         /* Kill stalled connections */
1810         list_for_each(p, &h->list) {
1811                 c = list_entry(p, struct hci_conn, list);
1812                 if (c->type == type && c->sent) {
1813                         BT_ERR("%s killing stalled connection %s",
1814                                 hdev->name, batostr(&c->dst));
1815                         hci_acl_disconn(c, 0x13);
1816                 }
1817         }
1818 }
1819
1820 static inline void hci_sched_acl(struct hci_dev *hdev)
1821 {
1822         struct hci_conn *conn;
1823         struct sk_buff *skb;
1824         int quote;
1825
1826         BT_DBG("%s", hdev->name);
1827
1828         if (!test_bit(HCI_RAW, &hdev->flags)) {
1829                 /* ACL tx timeout must be longer than maximum
1830                  * link supervision timeout (40.9 seconds) */
1831                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1832                         hci_link_tx_to(hdev, ACL_LINK);
1833         }
1834
1835         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1836                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1837                         BT_DBG("skb %p len %d", skb, skb->len);
1838
1839                         hci_conn_enter_active_mode(conn);
1840
1841                         hci_send_frame(skb);
1842                         hdev->acl_last_tx = jiffies;
1843
1844                         hdev->acl_cnt--;
1845                         conn->sent++;
1846                 }
1847         }
1848 }
1849
1850 /* Schedule SCO */
1851 static inline void hci_sched_sco(struct hci_dev *hdev)
1852 {
1853         struct hci_conn *conn;
1854         struct sk_buff *skb;
1855         int quote;
1856
1857         BT_DBG("%s", hdev->name);
1858
1859         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1860                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1861                         BT_DBG("skb %p len %d", skb, skb->len);
1862                         hci_send_frame(skb);
1863
1864                         conn->sent++;
1865                         if (conn->sent == ~0)
1866                                 conn->sent = 0;
1867                 }
1868         }
1869 }
1870
1871 static inline void hci_sched_esco(struct hci_dev *hdev)
1872 {
1873         struct hci_conn *conn;
1874         struct sk_buff *skb;
1875         int quote;
1876
1877         BT_DBG("%s", hdev->name);
1878
1879         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1880                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1881                         BT_DBG("skb %p len %d", skb, skb->len);
1882                         hci_send_frame(skb);
1883
1884                         conn->sent++;
1885                         if (conn->sent == ~0)
1886                                 conn->sent = 0;
1887                 }
1888         }
1889 }
1890
1891 static inline void hci_sched_le(struct hci_dev *hdev)
1892 {
1893         struct hci_conn *conn;
1894         struct sk_buff *skb;
1895         int quote, cnt;
1896
1897         BT_DBG("%s", hdev->name);
1898
1899         if (!test_bit(HCI_RAW, &hdev->flags)) {
1900                 /* LE tx timeout must be longer than maximum
1901                  * link supervision timeout (40.9 seconds) */
1902                 if (!hdev->le_cnt && hdev->le_pkts &&
1903                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1904                         hci_link_tx_to(hdev, LE_LINK);
1905         }
1906
1907         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1908         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1909                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1910                         BT_DBG("skb %p len %d", skb, skb->len);
1911
1912                         hci_send_frame(skb);
1913                         hdev->le_last_tx = jiffies;
1914
1915                         cnt--;
1916                         conn->sent++;
1917                 }
1918         }
1919         if (hdev->le_pkts)
1920                 hdev->le_cnt = cnt;
1921         else
1922                 hdev->acl_cnt = cnt;
1923 }
1924
1925 static void hci_tx_task(unsigned long arg)
1926 {
1927         struct hci_dev *hdev = (struct hci_dev *) arg;
1928         struct sk_buff *skb;
1929
1930         read_lock(&hci_task_lock);
1931
1932         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1933                 hdev->sco_cnt, hdev->le_cnt);
1934
1935         /* Schedule queues and send stuff to HCI driver */
1936
1937         hci_sched_acl(hdev);
1938
1939         hci_sched_sco(hdev);
1940
1941         hci_sched_esco(hdev);
1942
1943         hci_sched_le(hdev);
1944
1945         /* Send next queued raw (unknown type) packet */
1946         while ((skb = skb_dequeue(&hdev->raw_q)))
1947                 hci_send_frame(skb);
1948
1949         read_unlock(&hci_task_lock);
1950 }
1951
1952 /* ----- HCI RX task (incoming data proccessing) ----- */
1953
1954 /* ACL data packet */
1955 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1956 {
1957         struct hci_acl_hdr *hdr = (void *) skb->data;
1958         struct hci_conn *conn;
1959         __u16 handle, flags;
1960
1961         skb_pull(skb, HCI_ACL_HDR_SIZE);
1962
1963         handle = __le16_to_cpu(hdr->handle);
1964         flags  = hci_flags(handle);
1965         handle = hci_handle(handle);
1966
1967         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1968
1969         hdev->stat.acl_rx++;
1970
1971         hci_dev_lock(hdev);
1972         conn = hci_conn_hash_lookup_handle(hdev, handle);
1973         hci_dev_unlock(hdev);
1974
1975         if (conn) {
1976                 register struct hci_proto *hp;
1977
1978                 hci_conn_enter_active_mode(conn);
1979
1980                 /* Send to upper protocol */
1981                 hp = hci_proto[HCI_PROTO_L2CAP];
1982                 if (hp && hp->recv_acldata) {
1983                         hp->recv_acldata(conn, skb, flags);
1984                         return;
1985                 }
1986         } else {
1987                 BT_ERR("%s ACL packet for unknown connection handle %d",
1988                         hdev->name, handle);
1989         }
1990
1991         kfree_skb(skb);
1992 }
1993
1994 /* SCO data packet */
1995 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1996 {
1997         struct hci_sco_hdr *hdr = (void *) skb->data;
1998         struct hci_conn *conn;
1999         __u16 handle;
2000
2001         skb_pull(skb, HCI_SCO_HDR_SIZE);
2002
2003         handle = __le16_to_cpu(hdr->handle);
2004
2005         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2006
2007         hdev->stat.sco_rx++;
2008
2009         hci_dev_lock(hdev);
2010         conn = hci_conn_hash_lookup_handle(hdev, handle);
2011         hci_dev_unlock(hdev);
2012
2013         if (conn) {
2014                 register struct hci_proto *hp;
2015
2016                 /* Send to upper protocol */
2017                 hp = hci_proto[HCI_PROTO_SCO];
2018                 if (hp && hp->recv_scodata) {
2019                         hp->recv_scodata(conn, skb);
2020                         return;
2021                 }
2022         } else {
2023                 BT_ERR("%s SCO packet for unknown connection handle %d",
2024                         hdev->name, handle);
2025         }
2026
2027         kfree_skb(skb);
2028 }
2029
2030 static void hci_rx_task(unsigned long arg)
2031 {
2032         struct hci_dev *hdev = (struct hci_dev *) arg;
2033         struct sk_buff *skb;
2034
2035         BT_DBG("%s", hdev->name);
2036
2037         read_lock(&hci_task_lock);
2038
2039         while ((skb = skb_dequeue(&hdev->rx_q))) {
2040                 if (atomic_read(&hdev->promisc)) {
2041                         /* Send copy to the sockets */
2042                         hci_send_to_sock(hdev, skb, NULL);
2043                 }
2044
2045                 if (test_bit(HCI_RAW, &hdev->flags)) {
2046                         kfree_skb(skb);
2047                         continue;
2048                 }
2049
2050                 if (test_bit(HCI_INIT, &hdev->flags)) {
2051                         /* Don't process data packets in this states. */
2052                         switch (bt_cb(skb)->pkt_type) {
2053                         case HCI_ACLDATA_PKT:
2054                         case HCI_SCODATA_PKT:
2055                                 kfree_skb(skb);
2056                                 continue;
2057                         }
2058                 }
2059
2060                 /* Process frame */
2061                 switch (bt_cb(skb)->pkt_type) {
2062                 case HCI_EVENT_PKT:
2063                         hci_event_packet(hdev, skb);
2064                         break;
2065
2066                 case HCI_ACLDATA_PKT:
2067                         BT_DBG("%s ACL data packet", hdev->name);
2068                         hci_acldata_packet(hdev, skb);
2069                         break;
2070
2071                 case HCI_SCODATA_PKT:
2072                         BT_DBG("%s SCO data packet", hdev->name);
2073                         hci_scodata_packet(hdev, skb);
2074                         break;
2075
2076                 default:
2077                         kfree_skb(skb);
2078                         break;
2079                 }
2080         }
2081
2082         read_unlock(&hci_task_lock);
2083 }
2084
2085 static void hci_cmd_task(unsigned long arg)
2086 {
2087         struct hci_dev *hdev = (struct hci_dev *) arg;
2088         struct sk_buff *skb;
2089
2090         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2091
2092         /* Send queued commands */
2093         if (atomic_read(&hdev->cmd_cnt)) {
2094                 skb = skb_dequeue(&hdev->cmd_q);
2095                 if (!skb)
2096                         return;
2097
2098                 kfree_skb(hdev->sent_cmd);
2099
2100                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2101                 if (hdev->sent_cmd) {
2102                         atomic_dec(&hdev->cmd_cnt);
2103                         hci_send_frame(skb);
2104                         mod_timer(&hdev->cmd_timer,
2105                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2106                 } else {
2107                         skb_queue_head(&hdev->cmd_q, skb);
2108                         tasklet_schedule(&hdev->cmd_task);
2109                 }
2110         }
2111 }