OSDN Git Service

Bluetooth: fix hciconfig hciX up issue
[android-x86/kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
56
57 static DEFINE_RWLOCK(hci_task_lock);
58
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
62
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
66
67 /* HCI protocols */
68 #define HCI_MAX_PROTO   2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73
74 /* ---- HCI notifications ---- */
75
76 int hci_register_notifier(struct notifier_block *nb)
77 {
78         return atomic_notifier_chain_register(&hci_notifier, nb);
79 }
80
81 int hci_unregister_notifier(struct notifier_block *nb)
82 {
83         return atomic_notifier_chain_unregister(&hci_notifier, nb);
84 }
85
86 static void hci_notify(struct hci_dev *hdev, int event)
87 {
88         atomic_notifier_call_chain(&hci_notifier, event, hdev);
89 }
90
91 /* ---- HCI requests ---- */
92
93 void hci_req_complete(struct hci_dev *hdev, int result)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 wake_up_interruptible(&hdev->req_wait_q);
101         }
102 }
103
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 {
106         BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = err;
110                 hdev->req_status = HCI_REQ_CANCELED;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117                                 unsigned long opt, __u32 timeout)
118 {
119         DECLARE_WAITQUEUE(wait, current);
120         int err = 0;
121
122         BT_DBG("%s start", hdev->name);
123
124         hdev->req_status = HCI_REQ_PEND;
125
126         add_wait_queue(&hdev->req_wait_q, &wait);
127         set_current_state(TASK_INTERRUPTIBLE);
128
129         req(hdev, opt);
130         schedule_timeout(timeout);
131
132         remove_wait_queue(&hdev->req_wait_q, &wait);
133
134         if (signal_pending(current))
135                 return -EINTR;
136
137         switch (hdev->req_status) {
138         case HCI_REQ_DONE:
139                 err = -bt_err(hdev->req_result);
140                 break;
141
142         case HCI_REQ_CANCELED:
143                 err = -hdev->req_result;
144                 break;
145
146         default:
147                 err = -ETIMEDOUT;
148                 break;
149         }
150
151         hdev->req_status = hdev->req_result = 0;
152
153         BT_DBG("%s end: err %d", hdev->name, err);
154
155         return err;
156 }
157
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159                                 unsigned long opt, __u32 timeout)
160 {
161         int ret;
162
163         if (!test_bit(HCI_UP, &hdev->flags))
164                 return -ENETDOWN;
165
166         /* Serialize all requests */
167         hci_req_lock(hdev);
168         ret = __hci_request(hdev, req, opt, timeout);
169         hci_req_unlock(hdev);
170
171         return ret;
172 }
173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176         BT_DBG("%s %ld", hdev->name, opt);
177
178         /* Reset device */
179         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180 }
181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         struct sk_buff *skb;
185         __le16 param;
186         __u8 flt_type;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Driver initialization */
191
192         /* Special commands */
193         while ((skb = skb_dequeue(&hdev->driver_init))) {
194                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195                 skb->dev = (void *) hdev;
196                 skb_queue_tail(&hdev->cmd_q, skb);
197                 hci_sched_cmd(hdev);
198         }
199         skb_queue_purge(&hdev->driver_init);
200
201         /* Mandatory initialization */
202
203         /* Reset */
204         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
205                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206
207         /* Read Local Supported Features */
208         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210         /* Read Local Version */
211         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215
216 #if 0
217         /* Host buffer size */
218         {
219                 struct hci_cp_host_buffer_size cp;
220                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
221                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
222                 cp.acl_max_pkt = cpu_to_le16(0xffff);
223                 cp.sco_max_pkt = cpu_to_le16(0xffff);
224                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225         }
226 #endif
227
228         /* Read BD Address */
229         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
230
231         /* Read Class of Device */
232         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
233
234         /* Read Local Name */
235         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
236
237         /* Read Voice Setting */
238         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
239
240         /* Optional initialization */
241
242         /* Clear Event Filters */
243         flt_type = HCI_FLT_CLEAR_ALL;
244         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
245
246         /* Page timeout ~20 secs */
247         param = cpu_to_le16(0x8000);
248         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
249
250         /* Connection accept timeout ~20 secs */
251         param = cpu_to_le16(0x7d00);
252         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
253 }
254
255 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
256 {
257         __u8 scan = opt;
258
259         BT_DBG("%s %x", hdev->name, scan);
260
261         /* Inquiry and Page scans */
262         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
263 }
264
265 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
266 {
267         __u8 auth = opt;
268
269         BT_DBG("%s %x", hdev->name, auth);
270
271         /* Authentication */
272         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
273 }
274
275 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
276 {
277         __u8 encrypt = opt;
278
279         BT_DBG("%s %x", hdev->name, encrypt);
280
281         /* Encryption */
282         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
283 }
284
285 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
286 {
287         __le16 policy = cpu_to_le16(opt);
288
289         BT_DBG("%s %x", hdev->name, policy);
290
291         /* Default link policy */
292         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
293 }
294
295 /* Get HCI device by index.
296  * Device is held on return. */
297 struct hci_dev *hci_dev_get(int index)
298 {
299         struct hci_dev *hdev = NULL;
300         struct list_head *p;
301
302         BT_DBG("%d", index);
303
304         if (index < 0)
305                 return NULL;
306
307         read_lock(&hci_dev_list_lock);
308         list_for_each(p, &hci_dev_list) {
309                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
310                 if (d->id == index) {
311                         hdev = hci_dev_hold(d);
312                         break;
313                 }
314         }
315         read_unlock(&hci_dev_list_lock);
316         return hdev;
317 }
318
319 /* ---- Inquiry support ---- */
320 static void inquiry_cache_flush(struct hci_dev *hdev)
321 {
322         struct inquiry_cache *cache = &hdev->inq_cache;
323         struct inquiry_entry *next  = cache->list, *e;
324
325         BT_DBG("cache %p", cache);
326
327         cache->list = NULL;
328         while ((e = next)) {
329                 next = e->next;
330                 kfree(e);
331         }
332 }
333
334 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
335 {
336         struct inquiry_cache *cache = &hdev->inq_cache;
337         struct inquiry_entry *e;
338
339         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
340
341         for (e = cache->list; e; e = e->next)
342                 if (!bacmp(&e->data.bdaddr, bdaddr))
343                         break;
344         return e;
345 }
346
347 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
348 {
349         struct inquiry_cache *cache = &hdev->inq_cache;
350         struct inquiry_entry *e;
351
352         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
353
354         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
355                 /* Entry not in the cache. Add new one. */
356                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
357                         return;
358                 e->next     = cache->list;
359                 cache->list = e;
360         }
361
362         memcpy(&e->data, data, sizeof(*data));
363         e->timestamp = jiffies;
364         cache->timestamp = jiffies;
365 }
366
367 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
368 {
369         struct inquiry_cache *cache = &hdev->inq_cache;
370         struct inquiry_info *info = (struct inquiry_info *) buf;
371         struct inquiry_entry *e;
372         int copied = 0;
373
374         for (e = cache->list; e && copied < num; e = e->next, copied++) {
375                 struct inquiry_data *data = &e->data;
376                 bacpy(&info->bdaddr, &data->bdaddr);
377                 info->pscan_rep_mode    = data->pscan_rep_mode;
378                 info->pscan_period_mode = data->pscan_period_mode;
379                 info->pscan_mode        = data->pscan_mode;
380                 memcpy(info->dev_class, data->dev_class, 3);
381                 info->clock_offset      = data->clock_offset;
382                 info++;
383         }
384
385         BT_DBG("cache %p, copied %d", cache, copied);
386         return copied;
387 }
388
389 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
390 {
391         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
392         struct hci_cp_inquiry cp;
393
394         BT_DBG("%s", hdev->name);
395
396         if (test_bit(HCI_INQUIRY, &hdev->flags))
397                 return;
398
399         /* Start Inquiry */
400         memcpy(&cp.lap, &ir->lap, 3);
401         cp.length  = ir->length;
402         cp.num_rsp = ir->num_rsp;
403         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
404 }
405
406 int hci_inquiry(void __user *arg)
407 {
408         __u8 __user *ptr = arg;
409         struct hci_inquiry_req ir;
410         struct hci_dev *hdev;
411         int err = 0, do_inquiry = 0, max_rsp;
412         long timeo;
413         __u8 *buf;
414
415         if (copy_from_user(&ir, ptr, sizeof(ir)))
416                 return -EFAULT;
417
418         if (!(hdev = hci_dev_get(ir.dev_id)))
419                 return -ENODEV;
420
421         hci_dev_lock_bh(hdev);
422         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
423                                         inquiry_cache_empty(hdev) ||
424                                         ir.flags & IREQ_CACHE_FLUSH) {
425                 inquiry_cache_flush(hdev);
426                 do_inquiry = 1;
427         }
428         hci_dev_unlock_bh(hdev);
429
430         timeo = ir.length * msecs_to_jiffies(2000);
431         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
432                 goto done;
433
434         /* for unlimited number of responses we will use buffer with 255 entries */
435         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
436
437         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
438          * copy it to the user space.
439          */
440         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
441                 err = -ENOMEM;
442                 goto done;
443         }
444
445         hci_dev_lock_bh(hdev);
446         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
447         hci_dev_unlock_bh(hdev);
448
449         BT_DBG("num_rsp %d", ir.num_rsp);
450
451         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
452                 ptr += sizeof(ir);
453                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
454                                         ir.num_rsp))
455                         err = -EFAULT;
456         } else
457                 err = -EFAULT;
458
459         kfree(buf);
460
461 done:
462         hci_dev_put(hdev);
463         return err;
464 }
465
466 /* ---- HCI ioctl helpers ---- */
467
468 int hci_dev_open(__u16 dev)
469 {
470         struct hci_dev *hdev;
471         int ret = 0;
472
473         if (!(hdev = hci_dev_get(dev)))
474                 return -ENODEV;
475
476         BT_DBG("%s %p", hdev->name, hdev);
477
478         hci_req_lock(hdev);
479
480         if (hdev->rfkill && hdev->rfkill->state == RFKILL_STATE_HARD_BLOCKED) {
481                 ret = -EBUSY;
482                 goto done;
483         }
484
485         if (test_bit(HCI_UP, &hdev->flags)) {
486                 ret = -EALREADY;
487                 goto done;
488         }
489
490         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
491                 set_bit(HCI_RAW, &hdev->flags);
492
493         if (hdev->open(hdev)) {
494                 ret = -EIO;
495                 goto done;
496         }
497
498         if (!test_bit(HCI_RAW, &hdev->flags)) {
499                 atomic_set(&hdev->cmd_cnt, 1);
500                 set_bit(HCI_INIT, &hdev->flags);
501
502                 //__hci_request(hdev, hci_reset_req, 0, HZ);
503                 ret = __hci_request(hdev, hci_init_req, 0,
504                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
505
506                 clear_bit(HCI_INIT, &hdev->flags);
507         }
508
509         if (!ret) {
510                 hci_dev_hold(hdev);
511                 set_bit(HCI_UP, &hdev->flags);
512                 hci_notify(hdev, HCI_DEV_UP);
513         } else {
514                 /* Init failed, cleanup */
515                 tasklet_kill(&hdev->rx_task);
516                 tasklet_kill(&hdev->tx_task);
517                 tasklet_kill(&hdev->cmd_task);
518
519                 skb_queue_purge(&hdev->cmd_q);
520                 skb_queue_purge(&hdev->rx_q);
521
522                 if (hdev->flush)
523                         hdev->flush(hdev);
524
525                 if (hdev->sent_cmd) {
526                         kfree_skb(hdev->sent_cmd);
527                         hdev->sent_cmd = NULL;
528                 }
529
530                 hdev->close(hdev);
531                 hdev->flags = 0;
532         }
533
534 done:
535         hci_req_unlock(hdev);
536         hci_dev_put(hdev);
537         return ret;
538 }
539
540 static int hci_dev_do_close(struct hci_dev *hdev)
541 {
542         BT_DBG("%s %p", hdev->name, hdev);
543
544         hci_req_cancel(hdev, ENODEV);
545         hci_req_lock(hdev);
546
547         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
548                 hci_req_unlock(hdev);
549                 return 0;
550         }
551
552         /* Kill RX and TX tasks */
553         tasklet_kill(&hdev->rx_task);
554         tasklet_kill(&hdev->tx_task);
555
556         hci_dev_lock_bh(hdev);
557         inquiry_cache_flush(hdev);
558         hci_conn_hash_flush(hdev);
559         hci_dev_unlock_bh(hdev);
560
561         hci_notify(hdev, HCI_DEV_DOWN);
562
563         if (hdev->flush)
564                 hdev->flush(hdev);
565
566         /* Reset device */
567         skb_queue_purge(&hdev->cmd_q);
568         atomic_set(&hdev->cmd_cnt, 1);
569         if (!test_bit(HCI_RAW, &hdev->flags)) {
570                 set_bit(HCI_INIT, &hdev->flags);
571                 __hci_request(hdev, hci_reset_req, 0,
572                                         msecs_to_jiffies(250));
573                 clear_bit(HCI_INIT, &hdev->flags);
574         }
575
576         /* Kill cmd task */
577         tasklet_kill(&hdev->cmd_task);
578
579         /* Drop queues */
580         skb_queue_purge(&hdev->rx_q);
581         skb_queue_purge(&hdev->cmd_q);
582         skb_queue_purge(&hdev->raw_q);
583
584         /* Drop last sent command */
585         if (hdev->sent_cmd) {
586                 kfree_skb(hdev->sent_cmd);
587                 hdev->sent_cmd = NULL;
588         }
589
590         /* After this point our queues are empty
591          * and no tasks are scheduled. */
592         hdev->close(hdev);
593
594         /* Clear flags */
595         hdev->flags = 0;
596
597         hci_req_unlock(hdev);
598
599         hci_dev_put(hdev);
600         return 0;
601 }
602
603 int hci_dev_close(__u16 dev)
604 {
605         struct hci_dev *hdev;
606         int err;
607
608         if (!(hdev = hci_dev_get(dev)))
609                 return -ENODEV;
610         err = hci_dev_do_close(hdev);
611         hci_dev_put(hdev);
612         return err;
613 }
614
615 int hci_dev_reset(__u16 dev)
616 {
617         struct hci_dev *hdev;
618         int ret = 0;
619
620         if (!(hdev = hci_dev_get(dev)))
621                 return -ENODEV;
622
623         hci_req_lock(hdev);
624         tasklet_disable(&hdev->tx_task);
625
626         if (!test_bit(HCI_UP, &hdev->flags))
627                 goto done;
628
629         /* Drop queues */
630         skb_queue_purge(&hdev->rx_q);
631         skb_queue_purge(&hdev->cmd_q);
632
633         hci_dev_lock_bh(hdev);
634         inquiry_cache_flush(hdev);
635         hci_conn_hash_flush(hdev);
636         hci_dev_unlock_bh(hdev);
637
638         if (hdev->flush)
639                 hdev->flush(hdev);
640
641         atomic_set(&hdev->cmd_cnt, 1);
642         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
643
644         if (!test_bit(HCI_RAW, &hdev->flags))
645                 ret = __hci_request(hdev, hci_reset_req, 0,
646                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
647
648 done:
649         tasklet_enable(&hdev->tx_task);
650         hci_req_unlock(hdev);
651         hci_dev_put(hdev);
652         return ret;
653 }
654
655 int hci_dev_reset_stat(__u16 dev)
656 {
657         struct hci_dev *hdev;
658         int ret = 0;
659
660         if (!(hdev = hci_dev_get(dev)))
661                 return -ENODEV;
662
663         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
664
665         hci_dev_put(hdev);
666
667         return ret;
668 }
669
670 int hci_dev_cmd(unsigned int cmd, void __user *arg)
671 {
672         struct hci_dev *hdev;
673         struct hci_dev_req dr;
674         int err = 0;
675
676         if (copy_from_user(&dr, arg, sizeof(dr)))
677                 return -EFAULT;
678
679         if (!(hdev = hci_dev_get(dr.dev_id)))
680                 return -ENODEV;
681
682         switch (cmd) {
683         case HCISETAUTH:
684                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
685                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
686                 break;
687
688         case HCISETENCRYPT:
689                 if (!lmp_encrypt_capable(hdev)) {
690                         err = -EOPNOTSUPP;
691                         break;
692                 }
693
694                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
695                         /* Auth must be enabled first */
696                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
697                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
698                         if (err)
699                                 break;
700                 }
701
702                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704                 break;
705
706         case HCISETSCAN:
707                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
708                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
709                 break;
710
711         case HCISETLINKPOL:
712                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
713                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
714                 break;
715
716         case HCISETLINKMODE:
717                 hdev->link_mode = ((__u16) dr.dev_opt) &
718                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
719                 break;
720
721         case HCISETPTYPE:
722                 hdev->pkt_type = (__u16) dr.dev_opt;
723                 break;
724
725         case HCISETACLMTU:
726                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
727                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
728                 break;
729
730         case HCISETSCOMTU:
731                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
732                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
733                 break;
734
735         default:
736                 err = -EINVAL;
737                 break;
738         }
739
740         hci_dev_put(hdev);
741         return err;
742 }
743
744 int hci_get_dev_list(void __user *arg)
745 {
746         struct hci_dev_list_req *dl;
747         struct hci_dev_req *dr;
748         struct list_head *p;
749         int n = 0, size, err;
750         __u16 dev_num;
751
752         if (get_user(dev_num, (__u16 __user *) arg))
753                 return -EFAULT;
754
755         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
756                 return -EINVAL;
757
758         size = sizeof(*dl) + dev_num * sizeof(*dr);
759
760         if (!(dl = kzalloc(size, GFP_KERNEL)))
761                 return -ENOMEM;
762
763         dr = dl->dev_req;
764
765         read_lock_bh(&hci_dev_list_lock);
766         list_for_each(p, &hci_dev_list) {
767                 struct hci_dev *hdev;
768                 hdev = list_entry(p, struct hci_dev, list);
769                 (dr + n)->dev_id  = hdev->id;
770                 (dr + n)->dev_opt = hdev->flags;
771                 if (++n >= dev_num)
772                         break;
773         }
774         read_unlock_bh(&hci_dev_list_lock);
775
776         dl->dev_num = n;
777         size = sizeof(*dl) + n * sizeof(*dr);
778
779         err = copy_to_user(arg, dl, size);
780         kfree(dl);
781
782         return err ? -EFAULT : 0;
783 }
784
785 int hci_get_dev_info(void __user *arg)
786 {
787         struct hci_dev *hdev;
788         struct hci_dev_info di;
789         int err = 0;
790
791         if (copy_from_user(&di, arg, sizeof(di)))
792                 return -EFAULT;
793
794         if (!(hdev = hci_dev_get(di.dev_id)))
795                 return -ENODEV;
796
797         strcpy(di.name, hdev->name);
798         di.bdaddr   = hdev->bdaddr;
799         di.type     = hdev->type;
800         di.flags    = hdev->flags;
801         di.pkt_type = hdev->pkt_type;
802         di.acl_mtu  = hdev->acl_mtu;
803         di.acl_pkts = hdev->acl_pkts;
804         di.sco_mtu  = hdev->sco_mtu;
805         di.sco_pkts = hdev->sco_pkts;
806         di.link_policy = hdev->link_policy;
807         di.link_mode   = hdev->link_mode;
808
809         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
810         memcpy(&di.features, &hdev->features, sizeof(di.features));
811
812         if (copy_to_user(arg, &di, sizeof(di)))
813                 err = -EFAULT;
814
815         hci_dev_put(hdev);
816
817         return err;
818 }
819
820 /* ---- Interface to HCI drivers ---- */
821
822 static int hci_rfkill_set_block(void *data, enum rfkill_state state)
823 {
824         struct hci_dev *hdev = data;
825         bool blocked = !(state == RFKILL_STATE_UNBLOCKED);
826
827         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
828
829         if (!blocked)
830                 return 0;
831
832         hci_dev_do_close(hdev);
833
834         return 0;
835 }
836
837 /* Alloc HCI device */
838 struct hci_dev *hci_alloc_dev(void)
839 {
840         struct hci_dev *hdev;
841
842         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
843         if (!hdev)
844                 return NULL;
845
846         skb_queue_head_init(&hdev->driver_init);
847
848         return hdev;
849 }
850 EXPORT_SYMBOL(hci_alloc_dev);
851
852 /* Free HCI device */
853 void hci_free_dev(struct hci_dev *hdev)
854 {
855         skb_queue_purge(&hdev->driver_init);
856
857         /* will free via device release */
858         put_device(&hdev->dev);
859 }
860 EXPORT_SYMBOL(hci_free_dev);
861
862 /* Register HCI device */
863 int hci_register_dev(struct hci_dev *hdev)
864 {
865         struct list_head *head = &hci_dev_list, *p;
866         int i, id = 0;
867
868         BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
869                                                 hdev->type, hdev->owner);
870
871         if (!hdev->open || !hdev->close || !hdev->destruct)
872                 return -EINVAL;
873
874         write_lock_bh(&hci_dev_list_lock);
875
876         /* Find first available device id */
877         list_for_each(p, &hci_dev_list) {
878                 if (list_entry(p, struct hci_dev, list)->id != id)
879                         break;
880                 head = p; id++;
881         }
882
883         sprintf(hdev->name, "hci%d", id);
884         hdev->id = id;
885         list_add(&hdev->list, head);
886
887         atomic_set(&hdev->refcnt, 1);
888         spin_lock_init(&hdev->lock);
889
890         hdev->flags = 0;
891         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
892         hdev->esco_type = (ESCO_HV1);
893         hdev->link_mode = (HCI_LM_ACCEPT);
894
895         hdev->idle_timeout = 0;
896         hdev->sniff_max_interval = 800;
897         hdev->sniff_min_interval = 80;
898
899         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
900         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
901         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
902
903         skb_queue_head_init(&hdev->rx_q);
904         skb_queue_head_init(&hdev->cmd_q);
905         skb_queue_head_init(&hdev->raw_q);
906
907         for (i = 0; i < 3; i++)
908                 hdev->reassembly[i] = NULL;
909
910         init_waitqueue_head(&hdev->req_wait_q);
911         init_MUTEX(&hdev->req_lock);
912
913         inquiry_cache_init(hdev);
914
915         hci_conn_hash_init(hdev);
916
917         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
918
919         atomic_set(&hdev->promisc, 0);
920
921         write_unlock_bh(&hci_dev_list_lock);
922
923         hci_register_sysfs(hdev);
924
925         hdev->rfkill = rfkill_allocate(&hdev->dev, RFKILL_TYPE_BLUETOOTH);
926         if (hdev->rfkill) {
927                 hdev->rfkill->name = hdev->name;
928                 hdev->rfkill->toggle_radio = hci_rfkill_set_block;
929                 hdev->rfkill->data = hdev;
930                 if (rfkill_register(hdev->rfkill) < 0) {
931                         rfkill_free(hdev->rfkill);
932                         hdev->rfkill = NULL;
933                 }
934         }
935
936         hci_notify(hdev, HCI_DEV_REG);
937
938         return id;
939 }
940 EXPORT_SYMBOL(hci_register_dev);
941
942 /* Unregister HCI device */
943 int hci_unregister_dev(struct hci_dev *hdev)
944 {
945         int i;
946
947         BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
948
949         write_lock_bh(&hci_dev_list_lock);
950         list_del(&hdev->list);
951         write_unlock_bh(&hci_dev_list_lock);
952
953         hci_dev_do_close(hdev);
954
955         for (i = 0; i < 3; i++)
956                 kfree_skb(hdev->reassembly[i]);
957
958         hci_notify(hdev, HCI_DEV_UNREG);
959
960         if (hdev->rfkill) {
961                 rfkill_unregister(hdev->rfkill);
962         }
963
964         hci_unregister_sysfs(hdev);
965
966         __hci_dev_put(hdev);
967
968         return 0;
969 }
970 EXPORT_SYMBOL(hci_unregister_dev);
971
972 /* Suspend HCI device */
973 int hci_suspend_dev(struct hci_dev *hdev)
974 {
975         hci_notify(hdev, HCI_DEV_SUSPEND);
976         return 0;
977 }
978 EXPORT_SYMBOL(hci_suspend_dev);
979
980 /* Resume HCI device */
981 int hci_resume_dev(struct hci_dev *hdev)
982 {
983         hci_notify(hdev, HCI_DEV_RESUME);
984         return 0;
985 }
986 EXPORT_SYMBOL(hci_resume_dev);
987
988 /* Receive packet type fragment */
989 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
990
991 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
992 {
993         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
994                 return -EILSEQ;
995
996         while (count) {
997                 struct sk_buff *skb = __reassembly(hdev, type);
998                 struct { int expect; } *scb;
999                 int len = 0;
1000
1001                 if (!skb) {
1002                         /* Start of the frame */
1003
1004                         switch (type) {
1005                         case HCI_EVENT_PKT:
1006                                 if (count >= HCI_EVENT_HDR_SIZE) {
1007                                         struct hci_event_hdr *h = data;
1008                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1009                                 } else
1010                                         return -EILSEQ;
1011                                 break;
1012
1013                         case HCI_ACLDATA_PKT:
1014                                 if (count >= HCI_ACL_HDR_SIZE) {
1015                                         struct hci_acl_hdr *h = data;
1016                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1017                                 } else
1018                                         return -EILSEQ;
1019                                 break;
1020
1021                         case HCI_SCODATA_PKT:
1022                                 if (count >= HCI_SCO_HDR_SIZE) {
1023                                         struct hci_sco_hdr *h = data;
1024                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1025                                 } else
1026                                         return -EILSEQ;
1027                                 break;
1028                         }
1029
1030                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1031                         if (!skb) {
1032                                 BT_ERR("%s no memory for packet", hdev->name);
1033                                 return -ENOMEM;
1034                         }
1035
1036                         skb->dev = (void *) hdev;
1037                         bt_cb(skb)->pkt_type = type;
1038
1039                         __reassembly(hdev, type) = skb;
1040
1041                         scb = (void *) skb->cb;
1042                         scb->expect = len;
1043                 } else {
1044                         /* Continuation */
1045
1046                         scb = (void *) skb->cb;
1047                         len = scb->expect;
1048                 }
1049
1050                 len = min(len, count);
1051
1052                 memcpy(skb_put(skb, len), data, len);
1053
1054                 scb->expect -= len;
1055
1056                 if (scb->expect == 0) {
1057                         /* Complete frame */
1058
1059                         __reassembly(hdev, type) = NULL;
1060
1061                         bt_cb(skb)->pkt_type = type;
1062                         hci_recv_frame(skb);
1063                 }
1064
1065                 count -= len; data += len;
1066         }
1067
1068         return 0;
1069 }
1070 EXPORT_SYMBOL(hci_recv_fragment);
1071
1072 /* ---- Interface to upper protocols ---- */
1073
1074 /* Register/Unregister protocols.
1075  * hci_task_lock is used to ensure that no tasks are running. */
1076 int hci_register_proto(struct hci_proto *hp)
1077 {
1078         int err = 0;
1079
1080         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1081
1082         if (hp->id >= HCI_MAX_PROTO)
1083                 return -EINVAL;
1084
1085         write_lock_bh(&hci_task_lock);
1086
1087         if (!hci_proto[hp->id])
1088                 hci_proto[hp->id] = hp;
1089         else
1090                 err = -EEXIST;
1091
1092         write_unlock_bh(&hci_task_lock);
1093
1094         return err;
1095 }
1096 EXPORT_SYMBOL(hci_register_proto);
1097
1098 int hci_unregister_proto(struct hci_proto *hp)
1099 {
1100         int err = 0;
1101
1102         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1103
1104         if (hp->id >= HCI_MAX_PROTO)
1105                 return -EINVAL;
1106
1107         write_lock_bh(&hci_task_lock);
1108
1109         if (hci_proto[hp->id])
1110                 hci_proto[hp->id] = NULL;
1111         else
1112                 err = -ENOENT;
1113
1114         write_unlock_bh(&hci_task_lock);
1115
1116         return err;
1117 }
1118 EXPORT_SYMBOL(hci_unregister_proto);
1119
1120 int hci_register_cb(struct hci_cb *cb)
1121 {
1122         BT_DBG("%p name %s", cb, cb->name);
1123
1124         write_lock_bh(&hci_cb_list_lock);
1125         list_add(&cb->list, &hci_cb_list);
1126         write_unlock_bh(&hci_cb_list_lock);
1127
1128         return 0;
1129 }
1130 EXPORT_SYMBOL(hci_register_cb);
1131
1132 int hci_unregister_cb(struct hci_cb *cb)
1133 {
1134         BT_DBG("%p name %s", cb, cb->name);
1135
1136         write_lock_bh(&hci_cb_list_lock);
1137         list_del(&cb->list);
1138         write_unlock_bh(&hci_cb_list_lock);
1139
1140         return 0;
1141 }
1142 EXPORT_SYMBOL(hci_unregister_cb);
1143
1144 static int hci_send_frame(struct sk_buff *skb)
1145 {
1146         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1147
1148         if (!hdev) {
1149                 kfree_skb(skb);
1150                 return -ENODEV;
1151         }
1152
1153         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1154
1155         if (atomic_read(&hdev->promisc)) {
1156                 /* Time stamp */
1157                 __net_timestamp(skb);
1158
1159                 hci_send_to_sock(hdev, skb);
1160         }
1161
1162         /* Get rid of skb owner, prior to sending to the driver. */
1163         skb_orphan(skb);
1164
1165         return hdev->send(skb);
1166 }
1167
1168 /* Send HCI command */
1169 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1170 {
1171         int len = HCI_COMMAND_HDR_SIZE + plen;
1172         struct hci_command_hdr *hdr;
1173         struct sk_buff *skb;
1174
1175         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1176
1177         skb = bt_skb_alloc(len, GFP_ATOMIC);
1178         if (!skb) {
1179                 BT_ERR("%s no memory for command", hdev->name);
1180                 return -ENOMEM;
1181         }
1182
1183         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1184         hdr->opcode = cpu_to_le16(opcode);
1185         hdr->plen   = plen;
1186
1187         if (plen)
1188                 memcpy(skb_put(skb, plen), param, plen);
1189
1190         BT_DBG("skb len %d", skb->len);
1191
1192         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1193         skb->dev = (void *) hdev;
1194         skb_queue_tail(&hdev->cmd_q, skb);
1195         hci_sched_cmd(hdev);
1196
1197         return 0;
1198 }
1199
1200 /* Get data from the previously sent command */
1201 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1202 {
1203         struct hci_command_hdr *hdr;
1204
1205         if (!hdev->sent_cmd)
1206                 return NULL;
1207
1208         hdr = (void *) hdev->sent_cmd->data;
1209
1210         if (hdr->opcode != cpu_to_le16(opcode))
1211                 return NULL;
1212
1213         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1214
1215         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1216 }
1217
1218 /* Send ACL data */
1219 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1220 {
1221         struct hci_acl_hdr *hdr;
1222         int len = skb->len;
1223
1224         skb_push(skb, HCI_ACL_HDR_SIZE);
1225         skb_reset_transport_header(skb);
1226         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1227         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1228         hdr->dlen   = cpu_to_le16(len);
1229 }
1230
1231 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1232 {
1233         struct hci_dev *hdev = conn->hdev;
1234         struct sk_buff *list;
1235
1236         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1237
1238         skb->dev = (void *) hdev;
1239         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1240         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1241
1242         if (!(list = skb_shinfo(skb)->frag_list)) {
1243                 /* Non fragmented */
1244                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1245
1246                 skb_queue_tail(&conn->data_q, skb);
1247         } else {
1248                 /* Fragmented */
1249                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1250
1251                 skb_shinfo(skb)->frag_list = NULL;
1252
1253                 /* Queue all fragments atomically */
1254                 spin_lock_bh(&conn->data_q.lock);
1255
1256                 __skb_queue_tail(&conn->data_q, skb);
1257                 do {
1258                         skb = list; list = list->next;
1259
1260                         skb->dev = (void *) hdev;
1261                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1262                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1263
1264                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1265
1266                         __skb_queue_tail(&conn->data_q, skb);
1267                 } while (list);
1268
1269                 spin_unlock_bh(&conn->data_q.lock);
1270         }
1271
1272         hci_sched_tx(hdev);
1273         return 0;
1274 }
1275 EXPORT_SYMBOL(hci_send_acl);
1276
1277 /* Send SCO data */
1278 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1279 {
1280         struct hci_dev *hdev = conn->hdev;
1281         struct hci_sco_hdr hdr;
1282
1283         BT_DBG("%s len %d", hdev->name, skb->len);
1284
1285         if (skb->len > hdev->sco_mtu) {
1286                 kfree_skb(skb);
1287                 return -EINVAL;
1288         }
1289
1290         hdr.handle = cpu_to_le16(conn->handle);
1291         hdr.dlen   = skb->len;
1292
1293         skb_push(skb, HCI_SCO_HDR_SIZE);
1294         skb_reset_transport_header(skb);
1295         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1296
1297         skb->dev = (void *) hdev;
1298         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1299         skb_queue_tail(&conn->data_q, skb);
1300         hci_sched_tx(hdev);
1301         return 0;
1302 }
1303 EXPORT_SYMBOL(hci_send_sco);
1304
1305 /* ---- HCI TX task (outgoing data) ---- */
1306
1307 /* HCI Connection scheduler */
1308 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1309 {
1310         struct hci_conn_hash *h = &hdev->conn_hash;
1311         struct hci_conn *conn = NULL;
1312         int num = 0, min = ~0;
1313         struct list_head *p;
1314
1315         /* We don't have to lock device here. Connections are always
1316          * added and removed with TX task disabled. */
1317         list_for_each(p, &h->list) {
1318                 struct hci_conn *c;
1319                 c = list_entry(p, struct hci_conn, list);
1320
1321                 if (c->type != type || skb_queue_empty(&c->data_q))
1322                         continue;
1323
1324                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1325                         continue;
1326
1327                 num++;
1328
1329                 if (c->sent < min) {
1330                         min  = c->sent;
1331                         conn = c;
1332                 }
1333         }
1334
1335         if (conn) {
1336                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1337                 int q = cnt / num;
1338                 *quote = q ? q : 1;
1339         } else
1340                 *quote = 0;
1341
1342         BT_DBG("conn %p quote %d", conn, *quote);
1343         return conn;
1344 }
1345
1346 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1347 {
1348         struct hci_conn_hash *h = &hdev->conn_hash;
1349         struct list_head *p;
1350         struct hci_conn  *c;
1351
1352         BT_ERR("%s ACL tx timeout", hdev->name);
1353
1354         /* Kill stalled connections */
1355         list_for_each(p, &h->list) {
1356                 c = list_entry(p, struct hci_conn, list);
1357                 if (c->type == ACL_LINK && c->sent) {
1358                         BT_ERR("%s killing stalled ACL connection %s",
1359                                 hdev->name, batostr(&c->dst));
1360                         hci_acl_disconn(c, 0x13);
1361                 }
1362         }
1363 }
1364
1365 static inline void hci_sched_acl(struct hci_dev *hdev)
1366 {
1367         struct hci_conn *conn;
1368         struct sk_buff *skb;
1369         int quote;
1370
1371         BT_DBG("%s", hdev->name);
1372
1373         if (!test_bit(HCI_RAW, &hdev->flags)) {
1374                 /* ACL tx timeout must be longer than maximum
1375                  * link supervision timeout (40.9 seconds) */
1376                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1377                         hci_acl_tx_to(hdev);
1378         }
1379
1380         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1381                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1382                         BT_DBG("skb %p len %d", skb, skb->len);
1383
1384                         hci_conn_enter_active_mode(conn);
1385
1386                         hci_send_frame(skb);
1387                         hdev->acl_last_tx = jiffies;
1388
1389                         hdev->acl_cnt--;
1390                         conn->sent++;
1391                 }
1392         }
1393 }
1394
1395 /* Schedule SCO */
1396 static inline void hci_sched_sco(struct hci_dev *hdev)
1397 {
1398         struct hci_conn *conn;
1399         struct sk_buff *skb;
1400         int quote;
1401
1402         BT_DBG("%s", hdev->name);
1403
1404         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1405                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1406                         BT_DBG("skb %p len %d", skb, skb->len);
1407                         hci_send_frame(skb);
1408
1409                         conn->sent++;
1410                         if (conn->sent == ~0)
1411                                 conn->sent = 0;
1412                 }
1413         }
1414 }
1415
1416 static inline void hci_sched_esco(struct hci_dev *hdev)
1417 {
1418         struct hci_conn *conn;
1419         struct sk_buff *skb;
1420         int quote;
1421
1422         BT_DBG("%s", hdev->name);
1423
1424         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1425                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1426                         BT_DBG("skb %p len %d", skb, skb->len);
1427                         hci_send_frame(skb);
1428
1429                         conn->sent++;
1430                         if (conn->sent == ~0)
1431                                 conn->sent = 0;
1432                 }
1433         }
1434 }
1435
1436 static void hci_tx_task(unsigned long arg)
1437 {
1438         struct hci_dev *hdev = (struct hci_dev *) arg;
1439         struct sk_buff *skb;
1440
1441         read_lock(&hci_task_lock);
1442
1443         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1444
1445         /* Schedule queues and send stuff to HCI driver */
1446
1447         hci_sched_acl(hdev);
1448
1449         hci_sched_sco(hdev);
1450
1451         hci_sched_esco(hdev);
1452
1453         /* Send next queued raw (unknown type) packet */
1454         while ((skb = skb_dequeue(&hdev->raw_q)))
1455                 hci_send_frame(skb);
1456
1457         read_unlock(&hci_task_lock);
1458 }
1459
1460 /* ----- HCI RX task (incoming data proccessing) ----- */
1461
1462 /* ACL data packet */
1463 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1464 {
1465         struct hci_acl_hdr *hdr = (void *) skb->data;
1466         struct hci_conn *conn;
1467         __u16 handle, flags;
1468
1469         skb_pull(skb, HCI_ACL_HDR_SIZE);
1470
1471         handle = __le16_to_cpu(hdr->handle);
1472         flags  = hci_flags(handle);
1473         handle = hci_handle(handle);
1474
1475         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1476
1477         hdev->stat.acl_rx++;
1478
1479         hci_dev_lock(hdev);
1480         conn = hci_conn_hash_lookup_handle(hdev, handle);
1481         hci_dev_unlock(hdev);
1482
1483         if (conn) {
1484                 register struct hci_proto *hp;
1485
1486                 hci_conn_enter_active_mode(conn);
1487
1488                 /* Send to upper protocol */
1489                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1490                         hp->recv_acldata(conn, skb, flags);
1491                         return;
1492                 }
1493         } else {
1494                 BT_ERR("%s ACL packet for unknown connection handle %d",
1495                         hdev->name, handle);
1496         }
1497
1498         kfree_skb(skb);
1499 }
1500
1501 /* SCO data packet */
1502 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1503 {
1504         struct hci_sco_hdr *hdr = (void *) skb->data;
1505         struct hci_conn *conn;
1506         __u16 handle;
1507
1508         skb_pull(skb, HCI_SCO_HDR_SIZE);
1509
1510         handle = __le16_to_cpu(hdr->handle);
1511
1512         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1513
1514         hdev->stat.sco_rx++;
1515
1516         hci_dev_lock(hdev);
1517         conn = hci_conn_hash_lookup_handle(hdev, handle);
1518         hci_dev_unlock(hdev);
1519
1520         if (conn) {
1521                 register struct hci_proto *hp;
1522
1523                 /* Send to upper protocol */
1524                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1525                         hp->recv_scodata(conn, skb);
1526                         return;
1527                 }
1528         } else {
1529                 BT_ERR("%s SCO packet for unknown connection handle %d",
1530                         hdev->name, handle);
1531         }
1532
1533         kfree_skb(skb);
1534 }
1535
1536 static void hci_rx_task(unsigned long arg)
1537 {
1538         struct hci_dev *hdev = (struct hci_dev *) arg;
1539         struct sk_buff *skb;
1540
1541         BT_DBG("%s", hdev->name);
1542
1543         read_lock(&hci_task_lock);
1544
1545         while ((skb = skb_dequeue(&hdev->rx_q))) {
1546                 if (atomic_read(&hdev->promisc)) {
1547                         /* Send copy to the sockets */
1548                         hci_send_to_sock(hdev, skb);
1549                 }
1550
1551                 if (test_bit(HCI_RAW, &hdev->flags)) {
1552                         kfree_skb(skb);
1553                         continue;
1554                 }
1555
1556                 if (test_bit(HCI_INIT, &hdev->flags)) {
1557                         /* Don't process data packets in this states. */
1558                         switch (bt_cb(skb)->pkt_type) {
1559                         case HCI_ACLDATA_PKT:
1560                         case HCI_SCODATA_PKT:
1561                                 kfree_skb(skb);
1562                                 continue;
1563                         }
1564                 }
1565
1566                 /* Process frame */
1567                 switch (bt_cb(skb)->pkt_type) {
1568                 case HCI_EVENT_PKT:
1569                         hci_event_packet(hdev, skb);
1570                         break;
1571
1572                 case HCI_ACLDATA_PKT:
1573                         BT_DBG("%s ACL data packet", hdev->name);
1574                         hci_acldata_packet(hdev, skb);
1575                         break;
1576
1577                 case HCI_SCODATA_PKT:
1578                         BT_DBG("%s SCO data packet", hdev->name);
1579                         hci_scodata_packet(hdev, skb);
1580                         break;
1581
1582                 default:
1583                         kfree_skb(skb);
1584                         break;
1585                 }
1586         }
1587
1588         read_unlock(&hci_task_lock);
1589 }
1590
1591 static void hci_cmd_task(unsigned long arg)
1592 {
1593         struct hci_dev *hdev = (struct hci_dev *) arg;
1594         struct sk_buff *skb;
1595
1596         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1597
1598         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1599                 BT_ERR("%s command tx timeout", hdev->name);
1600                 atomic_set(&hdev->cmd_cnt, 1);
1601         }
1602
1603         /* Send queued commands */
1604         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1605                 kfree_skb(hdev->sent_cmd);
1606
1607                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1608                         atomic_dec(&hdev->cmd_cnt);
1609                         hci_send_frame(skb);
1610                         hdev->cmd_last_tx = jiffies;
1611                 } else {
1612                         skb_queue_head(&hdev->cmd_q, skb);
1613                         hci_sched_cmd(hdev);
1614                 }
1615         }
1616 }