OSDN Git Service

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[uclinux-h8/linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63                              size_t count, loff_t *ppos)
64 {
65         struct hci_dev *hdev = file->private_data;
66         char buf[3];
67
68         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69         buf[1] = '\n';
70         buf[2] = '\0';
71         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75                               size_t count, loff_t *ppos)
76 {
77         struct hci_dev *hdev = file->private_data;
78         struct sk_buff *skb;
79         bool enable;
80         int err;
81
82         if (!test_bit(HCI_UP, &hdev->flags))
83                 return -ENETDOWN;
84
85         err = kstrtobool_from_user(user_buf, count, &enable);
86         if (err)
87                 return err;
88
89         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
90                 return -EALREADY;
91
92         hci_req_sync_lock(hdev);
93         if (enable)
94                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95                                      HCI_CMD_TIMEOUT);
96         else
97                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98                                      HCI_CMD_TIMEOUT);
99         hci_req_sync_unlock(hdev);
100
101         if (IS_ERR(skb))
102                 return PTR_ERR(skb);
103
104         kfree_skb(skb);
105
106         hci_dev_change_flag(hdev, HCI_DUT_MODE);
107
108         return count;
109 }
110
111 static const struct file_operations dut_mode_fops = {
112         .open           = simple_open,
113         .read           = dut_mode_read,
114         .write          = dut_mode_write,
115         .llseek         = default_llseek,
116 };
117
118 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119                                 size_t count, loff_t *ppos)
120 {
121         struct hci_dev *hdev = file->private_data;
122         char buf[3];
123
124         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
125         buf[1] = '\n';
126         buf[2] = '\0';
127         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128 }
129
130 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131                                  size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         bool enable;
135         int err;
136
137         err = kstrtobool_from_user(user_buf, count, &enable);
138         if (err)
139                 return err;
140
141         /* When the diagnostic flags are not persistent and the transport
142          * is not active or in user channel operation, then there is no need
143          * for the vendor callback. Instead just store the desired value and
144          * the setting will be programmed when the controller gets powered on.
145          */
146         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
147             (!test_bit(HCI_RUNNING, &hdev->flags) ||
148              hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
149                 goto done;
150
151         hci_req_sync_lock(hdev);
152         err = hdev->set_diag(hdev, enable);
153         hci_req_sync_unlock(hdev);
154
155         if (err < 0)
156                 return err;
157
158 done:
159         if (enable)
160                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161         else
162                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164         return count;
165 }
166
167 static const struct file_operations vendor_diag_fops = {
168         .open           = simple_open,
169         .read           = vendor_diag_read,
170         .write          = vendor_diag_write,
171         .llseek         = default_llseek,
172 };
173
174 static void hci_debugfs_create_basic(struct hci_dev *hdev)
175 {
176         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177                             &dut_mode_fops);
178
179         if (hdev->set_diag)
180                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181                                     &vendor_diag_fops);
182 }
183
184 static int hci_reset_req(struct hci_request *req, unsigned long opt)
185 {
186         BT_DBG("%s %ld", req->hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &req->hdev->flags);
190         hci_req_add(req, HCI_OP_RESET, 0, NULL);
191         return 0;
192 }
193
194 static void bredr_init(struct hci_request *req)
195 {
196         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Read Local Supported Features */
199         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
200
201         /* Read Local Version */
202         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
203
204         /* Read BD Address */
205         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
206 }
207
208 static void amp_init1(struct hci_request *req)
209 {
210         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
211
212         /* Read Local Version */
213         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Local Supported Commands */
216         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
218         /* Read Local AMP Info */
219         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
220
221         /* Read Data Blk size */
222         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
223
224         /* Read Flow Control Mode */
225         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
227         /* Read Location Data */
228         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
229 }
230
231 static int amp_init2(struct hci_request *req)
232 {
233         /* Read Local Supported Features. Not all AMP controllers
234          * support this so it's placed conditionally in the second
235          * stage init.
236          */
237         if (req->hdev->commands[14] & 0x20)
238                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
239
240         return 0;
241 }
242
243 static int hci_init1_req(struct hci_request *req, unsigned long opt)
244 {
245         struct hci_dev *hdev = req->hdev;
246
247         BT_DBG("%s %ld", hdev->name, opt);
248
249         /* Reset */
250         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
251                 hci_reset_req(req, 0);
252
253         switch (hdev->dev_type) {
254         case HCI_PRIMARY:
255                 bredr_init(req);
256                 break;
257         case HCI_AMP:
258                 amp_init1(req);
259                 break;
260         default:
261                 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
262                 break;
263         }
264
265         return 0;
266 }
267
268 static void bredr_setup(struct hci_request *req)
269 {
270         __le16 param;
271         __u8 flt_type;
272
273         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
274         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
275
276         /* Read Class of Device */
277         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
278
279         /* Read Local Name */
280         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
281
282         /* Read Voice Setting */
283         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
284
285         /* Read Number of Supported IAC */
286         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
288         /* Read Current IAC LAP */
289         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
291         /* Clear Event Filters */
292         flt_type = HCI_FLT_CLEAR_ALL;
293         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
294
295         /* Connection accept timeout ~20 secs */
296         param = cpu_to_le16(0x7d00);
297         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
298 }
299
300 static void le_setup(struct hci_request *req)
301 {
302         struct hci_dev *hdev = req->hdev;
303
304         /* Read LE Buffer Size */
305         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
306
307         /* Read LE Local Supported Features */
308         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
309
310         /* Read LE Supported States */
311         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
313         /* LE-only controllers have LE implicitly enabled */
314         if (!lmp_bredr_capable(hdev))
315                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
316 }
317
318 static void hci_setup_event_mask(struct hci_request *req)
319 {
320         struct hci_dev *hdev = req->hdev;
321
322         /* The second byte is 0xff instead of 0x9f (two reserved bits
323          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324          * command otherwise.
325          */
326         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329          * any event mask for pre 1.2 devices.
330          */
331         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332                 return;
333
334         if (lmp_bredr_capable(hdev)) {
335                 events[4] |= 0x01; /* Flow Specification Complete */
336         } else {
337                 /* Use a different default for LE-only devices */
338                 memset(events, 0, sizeof(events));
339                 events[1] |= 0x20; /* Command Complete */
340                 events[1] |= 0x40; /* Command Status */
341                 events[1] |= 0x80; /* Hardware Error */
342
343                 /* If the controller supports the Disconnect command, enable
344                  * the corresponding event. In addition enable packet flow
345                  * control related events.
346                  */
347                 if (hdev->commands[0] & 0x20) {
348                         events[0] |= 0x10; /* Disconnection Complete */
349                         events[2] |= 0x04; /* Number of Completed Packets */
350                         events[3] |= 0x02; /* Data Buffer Overflow */
351                 }
352
353                 /* If the controller supports the Read Remote Version
354                  * Information command, enable the corresponding event.
355                  */
356                 if (hdev->commands[2] & 0x80)
357                         events[1] |= 0x08; /* Read Remote Version Information
358                                             * Complete
359                                             */
360
361                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362                         events[0] |= 0x80; /* Encryption Change */
363                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
364                 }
365         }
366
367         if (lmp_inq_rssi_capable(hdev) ||
368             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
369                 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
371         if (lmp_ext_feat_capable(hdev))
372                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374         if (lmp_esco_capable(hdev)) {
375                 events[5] |= 0x08; /* Synchronous Connection Complete */
376                 events[5] |= 0x10; /* Synchronous Connection Changed */
377         }
378
379         if (lmp_sniffsubr_capable(hdev))
380                 events[5] |= 0x20; /* Sniff Subrating */
381
382         if (lmp_pause_enc_capable(hdev))
383                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385         if (lmp_ext_inq_capable(hdev))
386                 events[5] |= 0x40; /* Extended Inquiry Result */
387
388         if (lmp_no_flush_capable(hdev))
389                 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391         if (lmp_lsto_capable(hdev))
392                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394         if (lmp_ssp_capable(hdev)) {
395                 events[6] |= 0x01;      /* IO Capability Request */
396                 events[6] |= 0x02;      /* IO Capability Response */
397                 events[6] |= 0x04;      /* User Confirmation Request */
398                 events[6] |= 0x08;      /* User Passkey Request */
399                 events[6] |= 0x10;      /* Remote OOB Data Request */
400                 events[6] |= 0x20;      /* Simple Pairing Complete */
401                 events[7] |= 0x04;      /* User Passkey Notification */
402                 events[7] |= 0x08;      /* Keypress Notification */
403                 events[7] |= 0x10;      /* Remote Host Supported
404                                          * Features Notification
405                                          */
406         }
407
408         if (lmp_le_capable(hdev))
409                 events[7] |= 0x20;      /* LE Meta-Event */
410
411         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
412 }
413
414 static int hci_init2_req(struct hci_request *req, unsigned long opt)
415 {
416         struct hci_dev *hdev = req->hdev;
417
418         if (hdev->dev_type == HCI_AMP)
419                 return amp_init2(req);
420
421         if (lmp_bredr_capable(hdev))
422                 bredr_setup(req);
423         else
424                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
425
426         if (lmp_le_capable(hdev))
427                 le_setup(req);
428
429         /* All Bluetooth 1.2 and later controllers should support the
430          * HCI command for reading the local supported commands.
431          *
432          * Unfortunately some controllers indicate Bluetooth 1.2 support,
433          * but do not have support for this command. If that is the case,
434          * the driver can quirk the behavior and skip reading the local
435          * supported commands.
436          */
437         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
439                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
440
441         if (lmp_ssp_capable(hdev)) {
442                 /* When SSP is available, then the host features page
443                  * should also be available as well. However some
444                  * controllers list the max_page as 0 as long as SSP
445                  * has not been enabled. To achieve proper debugging
446                  * output, force the minimum max_page to 1 at least.
447                  */
448                 hdev->max_page = 0x01;
449
450                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
451                         u8 mode = 0x01;
452
453                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454                                     sizeof(mode), &mode);
455                 } else {
456                         struct hci_cp_write_eir cp;
457
458                         memset(hdev->eir, 0, sizeof(hdev->eir));
459                         memset(&cp, 0, sizeof(cp));
460
461                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
462                 }
463         }
464
465         if (lmp_inq_rssi_capable(hdev) ||
466             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
467                 u8 mode;
468
469                 /* If Extended Inquiry Result events are supported, then
470                  * they are clearly preferred over Inquiry Result with RSSI
471                  * events.
472                  */
473                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476         }
477
478         if (lmp_inq_tx_pwr_capable(hdev))
479                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
480
481         if (lmp_ext_feat_capable(hdev)) {
482                 struct hci_cp_read_local_ext_features cp;
483
484                 cp.page = 0x01;
485                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486                             sizeof(cp), &cp);
487         }
488
489         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
490                 u8 enable = 1;
491                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492                             &enable);
493         }
494
495         return 0;
496 }
497
498 static void hci_setup_link_policy(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501         struct hci_cp_write_def_link_policy cp;
502         u16 link_policy = 0;
503
504         if (lmp_rswitch_capable(hdev))
505                 link_policy |= HCI_LP_RSWITCH;
506         if (lmp_hold_capable(hdev))
507                 link_policy |= HCI_LP_HOLD;
508         if (lmp_sniff_capable(hdev))
509                 link_policy |= HCI_LP_SNIFF;
510         if (lmp_park_capable(hdev))
511                 link_policy |= HCI_LP_PARK;
512
513         cp.policy = cpu_to_le16(link_policy);
514         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
515 }
516
517 static void hci_set_le_support(struct hci_request *req)
518 {
519         struct hci_dev *hdev = req->hdev;
520         struct hci_cp_write_le_host_supported cp;
521
522         /* LE-only devices do not support explicit enablement */
523         if (!lmp_bredr_capable(hdev))
524                 return;
525
526         memset(&cp, 0, sizeof(cp));
527
528         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
529                 cp.le = 0x01;
530                 cp.simul = 0x00;
531         }
532
533         if (cp.le != lmp_host_le_capable(hdev))
534                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535                             &cp);
536 }
537
538 static void hci_set_event_mask_page_2(struct hci_request *req)
539 {
540         struct hci_dev *hdev = req->hdev;
541         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
542         bool changed = false;
543
544         /* If Connectionless Slave Broadcast master role is supported
545          * enable all necessary events for it.
546          */
547         if (lmp_csb_master_capable(hdev)) {
548                 events[1] |= 0x40;      /* Triggered Clock Capture */
549                 events[1] |= 0x80;      /* Synchronization Train Complete */
550                 events[2] |= 0x10;      /* Slave Page Response Timeout */
551                 events[2] |= 0x20;      /* CSB Channel Map Change */
552                 changed = true;
553         }
554
555         /* If Connectionless Slave Broadcast slave role is supported
556          * enable all necessary events for it.
557          */
558         if (lmp_csb_slave_capable(hdev)) {
559                 events[2] |= 0x01;      /* Synchronization Train Received */
560                 events[2] |= 0x02;      /* CSB Receive */
561                 events[2] |= 0x04;      /* CSB Timeout */
562                 events[2] |= 0x08;      /* Truncated Page Complete */
563                 changed = true;
564         }
565
566         /* Enable Authenticated Payload Timeout Expired event if supported */
567         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
568                 events[2] |= 0x80;
569                 changed = true;
570         }
571
572         /* Some Broadcom based controllers indicate support for Set Event
573          * Mask Page 2 command, but then actually do not support it. Since
574          * the default value is all bits set to zero, the command is only
575          * required if the event mask has to be changed. In case no change
576          * to the event mask is needed, skip this command.
577          */
578         if (changed)
579                 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580                             sizeof(events), events);
581 }
582
583 static int hci_init3_req(struct hci_request *req, unsigned long opt)
584 {
585         struct hci_dev *hdev = req->hdev;
586         u8 p;
587
588         hci_setup_event_mask(req);
589
590         if (hdev->commands[6] & 0x20 &&
591             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
592                 struct hci_cp_read_stored_link_key cp;
593
594                 bacpy(&cp.bdaddr, BDADDR_ANY);
595                 cp.read_all = 0x01;
596                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597         }
598
599         if (hdev->commands[5] & 0x10)
600                 hci_setup_link_policy(req);
601
602         if (hdev->commands[8] & 0x01)
603                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605         /* Some older Broadcom based Bluetooth 1.2 controllers do not
606          * support the Read Page Scan Type command. Check support for
607          * this command in the bit mask of supported commands.
608          */
609         if (hdev->commands[13] & 0x01)
610                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
612         if (lmp_le_capable(hdev)) {
613                 u8 events[8];
614
615                 memset(events, 0, sizeof(events));
616
617                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618                         events[0] |= 0x10;      /* LE Long Term Key Request */
619
620                 /* If controller supports the Connection Parameters Request
621                  * Link Layer Procedure, enable the corresponding event.
622                  */
623                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624                         events[0] |= 0x20;      /* LE Remote Connection
625                                                  * Parameter Request
626                                                  */
627
628                 /* If the controller supports the Data Length Extension
629                  * feature, enable the corresponding event.
630                  */
631                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632                         events[0] |= 0x40;      /* LE Data Length Change */
633
634                 /* If the controller supports Extended Scanner Filter
635                  * Policies, enable the correspondig event.
636                  */
637                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638                         events[1] |= 0x04;      /* LE Direct Advertising
639                                                  * Report
640                                                  */
641
642                 /* If the controller supports Channel Selection Algorithm #2
643                  * feature, enable the corresponding event.
644                  */
645                 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646                         events[2] |= 0x08;      /* LE Channel Selection
647                                                  * Algorithm
648                                                  */
649
650                 /* If the controller supports the LE Set Scan Enable command,
651                  * enable the corresponding advertising report event.
652                  */
653                 if (hdev->commands[26] & 0x08)
654                         events[0] |= 0x02;      /* LE Advertising Report */
655
656                 /* If the controller supports the LE Create Connection
657                  * command, enable the corresponding event.
658                  */
659                 if (hdev->commands[26] & 0x10)
660                         events[0] |= 0x01;      /* LE Connection Complete */
661
662                 /* If the controller supports the LE Connection Update
663                  * command, enable the corresponding event.
664                  */
665                 if (hdev->commands[27] & 0x04)
666                         events[0] |= 0x04;      /* LE Connection Update
667                                                  * Complete
668                                                  */
669
670                 /* If the controller supports the LE Read Remote Used Features
671                  * command, enable the corresponding event.
672                  */
673                 if (hdev->commands[27] & 0x20)
674                         events[0] |= 0x08;      /* LE Read Remote Used
675                                                  * Features Complete
676                                                  */
677
678                 /* If the controller supports the LE Read Local P-256
679                  * Public Key command, enable the corresponding event.
680                  */
681                 if (hdev->commands[34] & 0x02)
682                         events[0] |= 0x80;      /* LE Read Local P-256
683                                                  * Public Key Complete
684                                                  */
685
686                 /* If the controller supports the LE Generate DHKey
687                  * command, enable the corresponding event.
688                  */
689                 if (hdev->commands[34] & 0x04)
690                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
691
692                 /* If the controller supports the LE Set Default PHY or
693                  * LE Set PHY commands, enable the corresponding event.
694                  */
695                 if (hdev->commands[35] & (0x20 | 0x40))
696                         events[1] |= 0x08;        /* LE PHY Update Complete */
697
698                 /* If the controller supports LE Set Extended Scan Parameters
699                  * and LE Set Extended Scan Enable commands, enable the
700                  * corresponding event.
701                  */
702                 if (use_ext_scan(hdev))
703                         events[1] |= 0x10;      /* LE Extended Advertising
704                                                  * Report
705                                                  */
706
707                 /* If the controller supports the LE Extended Create Connection
708                  * command, enable the corresponding event.
709                  */
710                 if (use_ext_conn(hdev))
711                         events[1] |= 0x02;      /* LE Enhanced Connection
712                                                  * Complete
713                                                  */
714
715                 /* If the controller supports the LE Extended Advertising
716                  * command, enable the corresponding event.
717                  */
718                 if (ext_adv_capable(hdev))
719                         events[2] |= 0x02;      /* LE Advertising Set
720                                                  * Terminated
721                                                  */
722
723                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
724                             events);
725
726                 /* Read LE Advertising Channel TX Power */
727                 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
728                         /* HCI TS spec forbids mixing of legacy and extended
729                          * advertising commands wherein READ_ADV_TX_POWER is
730                          * also included. So do not call it if extended adv
731                          * is supported otherwise controller will return
732                          * COMMAND_DISALLOWED for extended commands.
733                          */
734                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
735                 }
736
737                 if (hdev->commands[26] & 0x40) {
738                         /* Read LE White List Size */
739                         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
740                                     0, NULL);
741                 }
742
743                 if (hdev->commands[26] & 0x80) {
744                         /* Clear LE White List */
745                         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
746                 }
747
748                 if (hdev->commands[34] & 0x40) {
749                         /* Read LE Resolving List Size */
750                         hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
751                                     0, NULL);
752                 }
753
754                 if (hdev->commands[34] & 0x20) {
755                         /* Clear LE Resolving List */
756                         hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
757                 }
758
759                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
760                         /* Read LE Maximum Data Length */
761                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
762
763                         /* Read LE Suggested Default Data Length */
764                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
765                 }
766
767                 if (ext_adv_capable(hdev)) {
768                         /* Read LE Number of Supported Advertising Sets */
769                         hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
770                                     0, NULL);
771                 }
772
773                 hci_set_le_support(req);
774         }
775
776         /* Read features beyond page 1 if available */
777         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
778                 struct hci_cp_read_local_ext_features cp;
779
780                 cp.page = p;
781                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
782                             sizeof(cp), &cp);
783         }
784
785         return 0;
786 }
787
788 static int hci_init4_req(struct hci_request *req, unsigned long opt)
789 {
790         struct hci_dev *hdev = req->hdev;
791
792         /* Some Broadcom based Bluetooth controllers do not support the
793          * Delete Stored Link Key command. They are clearly indicating its
794          * absence in the bit mask of supported commands.
795          *
796          * Check the supported commands and only if the the command is marked
797          * as supported send it. If not supported assume that the controller
798          * does not have actual support for stored link keys which makes this
799          * command redundant anyway.
800          *
801          * Some controllers indicate that they support handling deleting
802          * stored link keys, but they don't. The quirk lets a driver
803          * just disable this command.
804          */
805         if (hdev->commands[6] & 0x80 &&
806             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
807                 struct hci_cp_delete_stored_link_key cp;
808
809                 bacpy(&cp.bdaddr, BDADDR_ANY);
810                 cp.delete_all = 0x01;
811                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
812                             sizeof(cp), &cp);
813         }
814
815         /* Set event mask page 2 if the HCI command for it is supported */
816         if (hdev->commands[22] & 0x04)
817                 hci_set_event_mask_page_2(req);
818
819         /* Read local codec list if the HCI command is supported */
820         if (hdev->commands[29] & 0x20)
821                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
822
823         /* Get MWS transport configuration if the HCI command is supported */
824         if (hdev->commands[30] & 0x08)
825                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
826
827         /* Check for Synchronization Train support */
828         if (lmp_sync_train_capable(hdev))
829                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
830
831         /* Enable Secure Connections if supported and configured */
832         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
833             bredr_sc_enabled(hdev)) {
834                 u8 support = 0x01;
835
836                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
837                             sizeof(support), &support);
838         }
839
840         /* Set Suggested Default Data Length to maximum if supported */
841         if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
842                 struct hci_cp_le_write_def_data_len cp;
843
844                 cp.tx_len = hdev->le_max_tx_len;
845                 cp.tx_time = hdev->le_max_tx_time;
846                 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
847         }
848
849         /* Set Default PHY parameters if command is supported */
850         if (hdev->commands[35] & 0x20) {
851                 struct hci_cp_le_set_default_phy cp;
852
853                 cp.all_phys = 0x00;
854                 cp.tx_phys = hdev->le_tx_def_phys;
855                 cp.rx_phys = hdev->le_rx_def_phys;
856
857                 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
858         }
859
860         return 0;
861 }
862
863 static int __hci_init(struct hci_dev *hdev)
864 {
865         int err;
866
867         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
868         if (err < 0)
869                 return err;
870
871         if (hci_dev_test_flag(hdev, HCI_SETUP))
872                 hci_debugfs_create_basic(hdev);
873
874         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
875         if (err < 0)
876                 return err;
877
878         /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
879          * BR/EDR/LE type controllers. AMP controllers only need the
880          * first two stages of init.
881          */
882         if (hdev->dev_type != HCI_PRIMARY)
883                 return 0;
884
885         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
886         if (err < 0)
887                 return err;
888
889         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
890         if (err < 0)
891                 return err;
892
893         /* This function is only called when the controller is actually in
894          * configured state. When the controller is marked as unconfigured,
895          * this initialization procedure is not run.
896          *
897          * It means that it is possible that a controller runs through its
898          * setup phase and then discovers missing settings. If that is the
899          * case, then this function will not be called. It then will only
900          * be called during the config phase.
901          *
902          * So only when in setup phase or config phase, create the debugfs
903          * entries and register the SMP channels.
904          */
905         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
906             !hci_dev_test_flag(hdev, HCI_CONFIG))
907                 return 0;
908
909         hci_debugfs_create_common(hdev);
910
911         if (lmp_bredr_capable(hdev))
912                 hci_debugfs_create_bredr(hdev);
913
914         if (lmp_le_capable(hdev))
915                 hci_debugfs_create_le(hdev);
916
917         return 0;
918 }
919
920 static int hci_init0_req(struct hci_request *req, unsigned long opt)
921 {
922         struct hci_dev *hdev = req->hdev;
923
924         BT_DBG("%s %ld", hdev->name, opt);
925
926         /* Reset */
927         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
928                 hci_reset_req(req, 0);
929
930         /* Read Local Version */
931         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
932
933         /* Read BD Address */
934         if (hdev->set_bdaddr)
935                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
936
937         return 0;
938 }
939
940 static int __hci_unconf_init(struct hci_dev *hdev)
941 {
942         int err;
943
944         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
945                 return 0;
946
947         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
948         if (err < 0)
949                 return err;
950
951         if (hci_dev_test_flag(hdev, HCI_SETUP))
952                 hci_debugfs_create_basic(hdev);
953
954         return 0;
955 }
956
957 static int hci_scan_req(struct hci_request *req, unsigned long opt)
958 {
959         __u8 scan = opt;
960
961         BT_DBG("%s %x", req->hdev->name, scan);
962
963         /* Inquiry and Page scans */
964         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
965         return 0;
966 }
967
968 static int hci_auth_req(struct hci_request *req, unsigned long opt)
969 {
970         __u8 auth = opt;
971
972         BT_DBG("%s %x", req->hdev->name, auth);
973
974         /* Authentication */
975         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
976         return 0;
977 }
978
979 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
980 {
981         __u8 encrypt = opt;
982
983         BT_DBG("%s %x", req->hdev->name, encrypt);
984
985         /* Encryption */
986         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
987         return 0;
988 }
989
990 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
991 {
992         __le16 policy = cpu_to_le16(opt);
993
994         BT_DBG("%s %x", req->hdev->name, policy);
995
996         /* Default link policy */
997         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
998         return 0;
999 }
1000
1001 /* Get HCI device by index.
1002  * Device is held on return. */
1003 struct hci_dev *hci_dev_get(int index)
1004 {
1005         struct hci_dev *hdev = NULL, *d;
1006
1007         BT_DBG("%d", index);
1008
1009         if (index < 0)
1010                 return NULL;
1011
1012         read_lock(&hci_dev_list_lock);
1013         list_for_each_entry(d, &hci_dev_list, list) {
1014                 if (d->id == index) {
1015                         hdev = hci_dev_hold(d);
1016                         break;
1017                 }
1018         }
1019         read_unlock(&hci_dev_list_lock);
1020         return hdev;
1021 }
1022
1023 /* ---- Inquiry support ---- */
1024
1025 bool hci_discovery_active(struct hci_dev *hdev)
1026 {
1027         struct discovery_state *discov = &hdev->discovery;
1028
1029         switch (discov->state) {
1030         case DISCOVERY_FINDING:
1031         case DISCOVERY_RESOLVING:
1032                 return true;
1033
1034         default:
1035                 return false;
1036         }
1037 }
1038
1039 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1040 {
1041         int old_state = hdev->discovery.state;
1042
1043         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1044
1045         if (old_state == state)
1046                 return;
1047
1048         hdev->discovery.state = state;
1049
1050         switch (state) {
1051         case DISCOVERY_STOPPED:
1052                 hci_update_background_scan(hdev);
1053
1054                 if (old_state != DISCOVERY_STARTING)
1055                         mgmt_discovering(hdev, 0);
1056                 break;
1057         case DISCOVERY_STARTING:
1058                 break;
1059         case DISCOVERY_FINDING:
1060                 mgmt_discovering(hdev, 1);
1061                 break;
1062         case DISCOVERY_RESOLVING:
1063                 break;
1064         case DISCOVERY_STOPPING:
1065                 break;
1066         }
1067 }
1068
1069 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1070 {
1071         struct discovery_state *cache = &hdev->discovery;
1072         struct inquiry_entry *p, *n;
1073
1074         list_for_each_entry_safe(p, n, &cache->all, all) {
1075                 list_del(&p->all);
1076                 kfree(p);
1077         }
1078
1079         INIT_LIST_HEAD(&cache->unknown);
1080         INIT_LIST_HEAD(&cache->resolve);
1081 }
1082
1083 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1084                                                bdaddr_t *bdaddr)
1085 {
1086         struct discovery_state *cache = &hdev->discovery;
1087         struct inquiry_entry *e;
1088
1089         BT_DBG("cache %p, %pMR", cache, bdaddr);
1090
1091         list_for_each_entry(e, &cache->all, all) {
1092                 if (!bacmp(&e->data.bdaddr, bdaddr))
1093                         return e;
1094         }
1095
1096         return NULL;
1097 }
1098
1099 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1100                                                        bdaddr_t *bdaddr)
1101 {
1102         struct discovery_state *cache = &hdev->discovery;
1103         struct inquiry_entry *e;
1104
1105         BT_DBG("cache %p, %pMR", cache, bdaddr);
1106
1107         list_for_each_entry(e, &cache->unknown, list) {
1108                 if (!bacmp(&e->data.bdaddr, bdaddr))
1109                         return e;
1110         }
1111
1112         return NULL;
1113 }
1114
1115 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1116                                                        bdaddr_t *bdaddr,
1117                                                        int state)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct inquiry_entry *e;
1121
1122         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1123
1124         list_for_each_entry(e, &cache->resolve, list) {
1125                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1126                         return e;
1127                 if (!bacmp(&e->data.bdaddr, bdaddr))
1128                         return e;
1129         }
1130
1131         return NULL;
1132 }
1133
1134 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1135                                       struct inquiry_entry *ie)
1136 {
1137         struct discovery_state *cache = &hdev->discovery;
1138         struct list_head *pos = &cache->resolve;
1139         struct inquiry_entry *p;
1140
1141         list_del(&ie->list);
1142
1143         list_for_each_entry(p, &cache->resolve, list) {
1144                 if (p->name_state != NAME_PENDING &&
1145                     abs(p->data.rssi) >= abs(ie->data.rssi))
1146                         break;
1147                 pos = &p->list;
1148         }
1149
1150         list_add(&ie->list, pos);
1151 }
1152
1153 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1154                              bool name_known)
1155 {
1156         struct discovery_state *cache = &hdev->discovery;
1157         struct inquiry_entry *ie;
1158         u32 flags = 0;
1159
1160         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1161
1162         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1163
1164         if (!data->ssp_mode)
1165                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1166
1167         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1168         if (ie) {
1169                 if (!ie->data.ssp_mode)
1170                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1171
1172                 if (ie->name_state == NAME_NEEDED &&
1173                     data->rssi != ie->data.rssi) {
1174                         ie->data.rssi = data->rssi;
1175                         hci_inquiry_cache_update_resolve(hdev, ie);
1176                 }
1177
1178                 goto update;
1179         }
1180
1181         /* Entry not in the cache. Add new one. */
1182         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1183         if (!ie) {
1184                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1185                 goto done;
1186         }
1187
1188         list_add(&ie->all, &cache->all);
1189
1190         if (name_known) {
1191                 ie->name_state = NAME_KNOWN;
1192         } else {
1193                 ie->name_state = NAME_NOT_KNOWN;
1194                 list_add(&ie->list, &cache->unknown);
1195         }
1196
1197 update:
1198         if (name_known && ie->name_state != NAME_KNOWN &&
1199             ie->name_state != NAME_PENDING) {
1200                 ie->name_state = NAME_KNOWN;
1201                 list_del(&ie->list);
1202         }
1203
1204         memcpy(&ie->data, data, sizeof(*data));
1205         ie->timestamp = jiffies;
1206         cache->timestamp = jiffies;
1207
1208         if (ie->name_state == NAME_NOT_KNOWN)
1209                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1210
1211 done:
1212         return flags;
1213 }
1214
1215 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1216 {
1217         struct discovery_state *cache = &hdev->discovery;
1218         struct inquiry_info *info = (struct inquiry_info *) buf;
1219         struct inquiry_entry *e;
1220         int copied = 0;
1221
1222         list_for_each_entry(e, &cache->all, all) {
1223                 struct inquiry_data *data = &e->data;
1224
1225                 if (copied >= num)
1226                         break;
1227
1228                 bacpy(&info->bdaddr, &data->bdaddr);
1229                 info->pscan_rep_mode    = data->pscan_rep_mode;
1230                 info->pscan_period_mode = data->pscan_period_mode;
1231                 info->pscan_mode        = data->pscan_mode;
1232                 memcpy(info->dev_class, data->dev_class, 3);
1233                 info->clock_offset      = data->clock_offset;
1234
1235                 info++;
1236                 copied++;
1237         }
1238
1239         BT_DBG("cache %p, copied %d", cache, copied);
1240         return copied;
1241 }
1242
1243 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1244 {
1245         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1246         struct hci_dev *hdev = req->hdev;
1247         struct hci_cp_inquiry cp;
1248
1249         BT_DBG("%s", hdev->name);
1250
1251         if (test_bit(HCI_INQUIRY, &hdev->flags))
1252                 return 0;
1253
1254         /* Start Inquiry */
1255         memcpy(&cp.lap, &ir->lap, 3);
1256         cp.length  = ir->length;
1257         cp.num_rsp = ir->num_rsp;
1258         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1259
1260         return 0;
1261 }
1262
1263 int hci_inquiry(void __user *arg)
1264 {
1265         __u8 __user *ptr = arg;
1266         struct hci_inquiry_req ir;
1267         struct hci_dev *hdev;
1268         int err = 0, do_inquiry = 0, max_rsp;
1269         long timeo;
1270         __u8 *buf;
1271
1272         if (copy_from_user(&ir, ptr, sizeof(ir)))
1273                 return -EFAULT;
1274
1275         hdev = hci_dev_get(ir.dev_id);
1276         if (!hdev)
1277                 return -ENODEV;
1278
1279         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1280                 err = -EBUSY;
1281                 goto done;
1282         }
1283
1284         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1285                 err = -EOPNOTSUPP;
1286                 goto done;
1287         }
1288
1289         if (hdev->dev_type != HCI_PRIMARY) {
1290                 err = -EOPNOTSUPP;
1291                 goto done;
1292         }
1293
1294         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1295                 err = -EOPNOTSUPP;
1296                 goto done;
1297         }
1298
1299         hci_dev_lock(hdev);
1300         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1301             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1302                 hci_inquiry_cache_flush(hdev);
1303                 do_inquiry = 1;
1304         }
1305         hci_dev_unlock(hdev);
1306
1307         timeo = ir.length * msecs_to_jiffies(2000);
1308
1309         if (do_inquiry) {
1310                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1311                                    timeo, NULL);
1312                 if (err < 0)
1313                         goto done;
1314
1315                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1316                  * cleared). If it is interrupted by a signal, return -EINTR.
1317                  */
1318                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1319                                 TASK_INTERRUPTIBLE))
1320                         return -EINTR;
1321         }
1322
1323         /* for unlimited number of responses we will use buffer with
1324          * 255 entries
1325          */
1326         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1327
1328         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1329          * copy it to the user space.
1330          */
1331         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1332         if (!buf) {
1333                 err = -ENOMEM;
1334                 goto done;
1335         }
1336
1337         hci_dev_lock(hdev);
1338         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1339         hci_dev_unlock(hdev);
1340
1341         BT_DBG("num_rsp %d", ir.num_rsp);
1342
1343         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1344                 ptr += sizeof(ir);
1345                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1346                                  ir.num_rsp))
1347                         err = -EFAULT;
1348         } else
1349                 err = -EFAULT;
1350
1351         kfree(buf);
1352
1353 done:
1354         hci_dev_put(hdev);
1355         return err;
1356 }
1357
1358 static int hci_dev_do_open(struct hci_dev *hdev)
1359 {
1360         int ret = 0;
1361
1362         BT_DBG("%s %p", hdev->name, hdev);
1363
1364         hci_req_sync_lock(hdev);
1365
1366         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1367                 ret = -ENODEV;
1368                 goto done;
1369         }
1370
1371         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1372             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1373                 /* Check for rfkill but allow the HCI setup stage to
1374                  * proceed (which in itself doesn't cause any RF activity).
1375                  */
1376                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1377                         ret = -ERFKILL;
1378                         goto done;
1379                 }
1380
1381                 /* Check for valid public address or a configured static
1382                  * random adddress, but let the HCI setup proceed to
1383                  * be able to determine if there is a public address
1384                  * or not.
1385                  *
1386                  * In case of user channel usage, it is not important
1387                  * if a public address or static random address is
1388                  * available.
1389                  *
1390                  * This check is only valid for BR/EDR controllers
1391                  * since AMP controllers do not have an address.
1392                  */
1393                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1394                     hdev->dev_type == HCI_PRIMARY &&
1395                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1396                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1397                         ret = -EADDRNOTAVAIL;
1398                         goto done;
1399                 }
1400         }
1401
1402         if (test_bit(HCI_UP, &hdev->flags)) {
1403                 ret = -EALREADY;
1404                 goto done;
1405         }
1406
1407         if (hdev->open(hdev)) {
1408                 ret = -EIO;
1409                 goto done;
1410         }
1411
1412         set_bit(HCI_RUNNING, &hdev->flags);
1413         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1414
1415         atomic_set(&hdev->cmd_cnt, 1);
1416         set_bit(HCI_INIT, &hdev->flags);
1417
1418         if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1419             test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1420                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1421
1422                 if (hdev->setup)
1423                         ret = hdev->setup(hdev);
1424
1425                 /* The transport driver can set these quirks before
1426                  * creating the HCI device or in its setup callback.
1427                  *
1428                  * In case any of them is set, the controller has to
1429                  * start up as unconfigured.
1430                  */
1431                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1432                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1433                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1434
1435                 /* For an unconfigured controller it is required to
1436                  * read at least the version information provided by
1437                  * the Read Local Version Information command.
1438                  *
1439                  * If the set_bdaddr driver callback is provided, then
1440                  * also the original Bluetooth public device address
1441                  * will be read using the Read BD Address command.
1442                  */
1443                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1444                         ret = __hci_unconf_init(hdev);
1445         }
1446
1447         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1448                 /* If public address change is configured, ensure that
1449                  * the address gets programmed. If the driver does not
1450                  * support changing the public address, fail the power
1451                  * on procedure.
1452                  */
1453                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1454                     hdev->set_bdaddr)
1455                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1456                 else
1457                         ret = -EADDRNOTAVAIL;
1458         }
1459
1460         if (!ret) {
1461                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1462                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1463                         ret = __hci_init(hdev);
1464                         if (!ret && hdev->post_init)
1465                                 ret = hdev->post_init(hdev);
1466                 }
1467         }
1468
1469         /* If the HCI Reset command is clearing all diagnostic settings,
1470          * then they need to be reprogrammed after the init procedure
1471          * completed.
1472          */
1473         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1474             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1475             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1476                 ret = hdev->set_diag(hdev, true);
1477
1478         clear_bit(HCI_INIT, &hdev->flags);
1479
1480         if (!ret) {
1481                 hci_dev_hold(hdev);
1482                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1483                 hci_adv_instances_set_rpa_expired(hdev, true);
1484                 set_bit(HCI_UP, &hdev->flags);
1485                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1486                 hci_leds_update_powered(hdev, true);
1487                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1488                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1489                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1490                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1491                     hci_dev_test_flag(hdev, HCI_MGMT) &&
1492                     hdev->dev_type == HCI_PRIMARY) {
1493                         ret = __hci_req_hci_power_on(hdev);
1494                         mgmt_power_on(hdev, ret);
1495                 }
1496         } else {
1497                 /* Init failed, cleanup */
1498                 flush_work(&hdev->tx_work);
1499                 flush_work(&hdev->cmd_work);
1500                 flush_work(&hdev->rx_work);
1501
1502                 skb_queue_purge(&hdev->cmd_q);
1503                 skb_queue_purge(&hdev->rx_q);
1504
1505                 if (hdev->flush)
1506                         hdev->flush(hdev);
1507
1508                 if (hdev->sent_cmd) {
1509                         kfree_skb(hdev->sent_cmd);
1510                         hdev->sent_cmd = NULL;
1511                 }
1512
1513                 clear_bit(HCI_RUNNING, &hdev->flags);
1514                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1515
1516                 hdev->close(hdev);
1517                 hdev->flags &= BIT(HCI_RAW);
1518         }
1519
1520 done:
1521         hci_req_sync_unlock(hdev);
1522         return ret;
1523 }
1524
1525 /* ---- HCI ioctl helpers ---- */
1526
1527 int hci_dev_open(__u16 dev)
1528 {
1529         struct hci_dev *hdev;
1530         int err;
1531
1532         hdev = hci_dev_get(dev);
1533         if (!hdev)
1534                 return -ENODEV;
1535
1536         /* Devices that are marked as unconfigured can only be powered
1537          * up as user channel. Trying to bring them up as normal devices
1538          * will result into a failure. Only user channel operation is
1539          * possible.
1540          *
1541          * When this function is called for a user channel, the flag
1542          * HCI_USER_CHANNEL will be set first before attempting to
1543          * open the device.
1544          */
1545         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1546             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1547                 err = -EOPNOTSUPP;
1548                 goto done;
1549         }
1550
1551         /* We need to ensure that no other power on/off work is pending
1552          * before proceeding to call hci_dev_do_open. This is
1553          * particularly important if the setup procedure has not yet
1554          * completed.
1555          */
1556         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1557                 cancel_delayed_work(&hdev->power_off);
1558
1559         /* After this call it is guaranteed that the setup procedure
1560          * has finished. This means that error conditions like RFKILL
1561          * or no valid public or static random address apply.
1562          */
1563         flush_workqueue(hdev->req_workqueue);
1564
1565         /* For controllers not using the management interface and that
1566          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1567          * so that pairing works for them. Once the management interface
1568          * is in use this bit will be cleared again and userspace has
1569          * to explicitly enable it.
1570          */
1571         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1572             !hci_dev_test_flag(hdev, HCI_MGMT))
1573                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1574
1575         err = hci_dev_do_open(hdev);
1576
1577 done:
1578         hci_dev_put(hdev);
1579         return err;
1580 }
1581
1582 /* This function requires the caller holds hdev->lock */
1583 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1584 {
1585         struct hci_conn_params *p;
1586
1587         list_for_each_entry(p, &hdev->le_conn_params, list) {
1588                 if (p->conn) {
1589                         hci_conn_drop(p->conn);
1590                         hci_conn_put(p->conn);
1591                         p->conn = NULL;
1592                 }
1593                 list_del_init(&p->action);
1594         }
1595
1596         BT_DBG("All LE pending actions cleared");
1597 }
1598
1599 int hci_dev_do_close(struct hci_dev *hdev)
1600 {
1601         bool auto_off;
1602
1603         BT_DBG("%s %p", hdev->name, hdev);
1604
1605         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1606             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1607             test_bit(HCI_UP, &hdev->flags)) {
1608                 /* Execute vendor specific shutdown routine */
1609                 if (hdev->shutdown)
1610                         hdev->shutdown(hdev);
1611         }
1612
1613         cancel_delayed_work(&hdev->power_off);
1614
1615         hci_request_cancel_all(hdev);
1616         hci_req_sync_lock(hdev);
1617
1618         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1619                 cancel_delayed_work_sync(&hdev->cmd_timer);
1620                 hci_req_sync_unlock(hdev);
1621                 return 0;
1622         }
1623
1624         hci_leds_update_powered(hdev, false);
1625
1626         /* Flush RX and TX works */
1627         flush_work(&hdev->tx_work);
1628         flush_work(&hdev->rx_work);
1629
1630         if (hdev->discov_timeout > 0) {
1631                 hdev->discov_timeout = 0;
1632                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1633                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1634         }
1635
1636         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1637                 cancel_delayed_work(&hdev->service_cache);
1638
1639         if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1640                 struct adv_info *adv_instance;
1641
1642                 cancel_delayed_work_sync(&hdev->rpa_expired);
1643
1644                 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1645                         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1646         }
1647
1648         /* Avoid potential lockdep warnings from the *_flush() calls by
1649          * ensuring the workqueue is empty up front.
1650          */
1651         drain_workqueue(hdev->workqueue);
1652
1653         hci_dev_lock(hdev);
1654
1655         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1656
1657         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1658
1659         if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1660             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1661             hci_dev_test_flag(hdev, HCI_MGMT))
1662                 __mgmt_power_off(hdev);
1663
1664         hci_inquiry_cache_flush(hdev);
1665         hci_pend_le_actions_clear(hdev);
1666         hci_conn_hash_flush(hdev);
1667         hci_dev_unlock(hdev);
1668
1669         smp_unregister(hdev);
1670
1671         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1672
1673         if (hdev->flush)
1674                 hdev->flush(hdev);
1675
1676         /* Reset device */
1677         skb_queue_purge(&hdev->cmd_q);
1678         atomic_set(&hdev->cmd_cnt, 1);
1679         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1680             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1681                 set_bit(HCI_INIT, &hdev->flags);
1682                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1683                 clear_bit(HCI_INIT, &hdev->flags);
1684         }
1685
1686         /* flush cmd  work */
1687         flush_work(&hdev->cmd_work);
1688
1689         /* Drop queues */
1690         skb_queue_purge(&hdev->rx_q);
1691         skb_queue_purge(&hdev->cmd_q);
1692         skb_queue_purge(&hdev->raw_q);
1693
1694         /* Drop last sent command */
1695         if (hdev->sent_cmd) {
1696                 cancel_delayed_work_sync(&hdev->cmd_timer);
1697                 kfree_skb(hdev->sent_cmd);
1698                 hdev->sent_cmd = NULL;
1699         }
1700
1701         clear_bit(HCI_RUNNING, &hdev->flags);
1702         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1703
1704         /* After this point our queues are empty
1705          * and no tasks are scheduled. */
1706         hdev->close(hdev);
1707
1708         /* Clear flags */
1709         hdev->flags &= BIT(HCI_RAW);
1710         hci_dev_clear_volatile_flags(hdev);
1711
1712         /* Controller radio is available but is currently powered down */
1713         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1714
1715         memset(hdev->eir, 0, sizeof(hdev->eir));
1716         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1717         bacpy(&hdev->random_addr, BDADDR_ANY);
1718
1719         hci_req_sync_unlock(hdev);
1720
1721         hci_dev_put(hdev);
1722         return 0;
1723 }
1724
1725 int hci_dev_close(__u16 dev)
1726 {
1727         struct hci_dev *hdev;
1728         int err;
1729
1730         hdev = hci_dev_get(dev);
1731         if (!hdev)
1732                 return -ENODEV;
1733
1734         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1735                 err = -EBUSY;
1736                 goto done;
1737         }
1738
1739         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1740                 cancel_delayed_work(&hdev->power_off);
1741
1742         err = hci_dev_do_close(hdev);
1743
1744 done:
1745         hci_dev_put(hdev);
1746         return err;
1747 }
1748
1749 static int hci_dev_do_reset(struct hci_dev *hdev)
1750 {
1751         int ret;
1752
1753         BT_DBG("%s %p", hdev->name, hdev);
1754
1755         hci_req_sync_lock(hdev);
1756
1757         /* Drop queues */
1758         skb_queue_purge(&hdev->rx_q);
1759         skb_queue_purge(&hdev->cmd_q);
1760
1761         /* Avoid potential lockdep warnings from the *_flush() calls by
1762          * ensuring the workqueue is empty up front.
1763          */
1764         drain_workqueue(hdev->workqueue);
1765
1766         hci_dev_lock(hdev);
1767         hci_inquiry_cache_flush(hdev);
1768         hci_conn_hash_flush(hdev);
1769         hci_dev_unlock(hdev);
1770
1771         if (hdev->flush)
1772                 hdev->flush(hdev);
1773
1774         atomic_set(&hdev->cmd_cnt, 1);
1775         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1776
1777         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1778
1779         hci_req_sync_unlock(hdev);
1780         return ret;
1781 }
1782
1783 int hci_dev_reset(__u16 dev)
1784 {
1785         struct hci_dev *hdev;
1786         int err;
1787
1788         hdev = hci_dev_get(dev);
1789         if (!hdev)
1790                 return -ENODEV;
1791
1792         if (!test_bit(HCI_UP, &hdev->flags)) {
1793                 err = -ENETDOWN;
1794                 goto done;
1795         }
1796
1797         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1798                 err = -EBUSY;
1799                 goto done;
1800         }
1801
1802         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1803                 err = -EOPNOTSUPP;
1804                 goto done;
1805         }
1806
1807         err = hci_dev_do_reset(hdev);
1808
1809 done:
1810         hci_dev_put(hdev);
1811         return err;
1812 }
1813
1814 int hci_dev_reset_stat(__u16 dev)
1815 {
1816         struct hci_dev *hdev;
1817         int ret = 0;
1818
1819         hdev = hci_dev_get(dev);
1820         if (!hdev)
1821                 return -ENODEV;
1822
1823         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1824                 ret = -EBUSY;
1825                 goto done;
1826         }
1827
1828         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1829                 ret = -EOPNOTSUPP;
1830                 goto done;
1831         }
1832
1833         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1834
1835 done:
1836         hci_dev_put(hdev);
1837         return ret;
1838 }
1839
1840 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1841 {
1842         bool conn_changed, discov_changed;
1843
1844         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1845
1846         if ((scan & SCAN_PAGE))
1847                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1848                                                           HCI_CONNECTABLE);
1849         else
1850                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1851                                                            HCI_CONNECTABLE);
1852
1853         if ((scan & SCAN_INQUIRY)) {
1854                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1855                                                             HCI_DISCOVERABLE);
1856         } else {
1857                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1858                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1859                                                              HCI_DISCOVERABLE);
1860         }
1861
1862         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1863                 return;
1864
1865         if (conn_changed || discov_changed) {
1866                 /* In case this was disabled through mgmt */
1867                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1868
1869                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1870                         hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1871
1872                 mgmt_new_settings(hdev);
1873         }
1874 }
1875
1876 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1877 {
1878         struct hci_dev *hdev;
1879         struct hci_dev_req dr;
1880         int err = 0;
1881
1882         if (copy_from_user(&dr, arg, sizeof(dr)))
1883                 return -EFAULT;
1884
1885         hdev = hci_dev_get(dr.dev_id);
1886         if (!hdev)
1887                 return -ENODEV;
1888
1889         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1890                 err = -EBUSY;
1891                 goto done;
1892         }
1893
1894         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1895                 err = -EOPNOTSUPP;
1896                 goto done;
1897         }
1898
1899         if (hdev->dev_type != HCI_PRIMARY) {
1900                 err = -EOPNOTSUPP;
1901                 goto done;
1902         }
1903
1904         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1905                 err = -EOPNOTSUPP;
1906                 goto done;
1907         }
1908
1909         switch (cmd) {
1910         case HCISETAUTH:
1911                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1912                                    HCI_INIT_TIMEOUT, NULL);
1913                 break;
1914
1915         case HCISETENCRYPT:
1916                 if (!lmp_encrypt_capable(hdev)) {
1917                         err = -EOPNOTSUPP;
1918                         break;
1919                 }
1920
1921                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1922                         /* Auth must be enabled first */
1923                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1924                                            HCI_INIT_TIMEOUT, NULL);
1925                         if (err)
1926                                 break;
1927                 }
1928
1929                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1930                                    HCI_INIT_TIMEOUT, NULL);
1931                 break;
1932
1933         case HCISETSCAN:
1934                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1935                                    HCI_INIT_TIMEOUT, NULL);
1936
1937                 /* Ensure that the connectable and discoverable states
1938                  * get correctly modified as this was a non-mgmt change.
1939                  */
1940                 if (!err)
1941                         hci_update_scan_state(hdev, dr.dev_opt);
1942                 break;
1943
1944         case HCISETLINKPOL:
1945                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1946                                    HCI_INIT_TIMEOUT, NULL);
1947                 break;
1948
1949         case HCISETLINKMODE:
1950                 hdev->link_mode = ((__u16) dr.dev_opt) &
1951                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1952                 break;
1953
1954         case HCISETPTYPE:
1955                 if (hdev->pkt_type == (__u16) dr.dev_opt)
1956                         break;
1957
1958                 hdev->pkt_type = (__u16) dr.dev_opt;
1959                 mgmt_phy_configuration_changed(hdev, NULL);
1960                 break;
1961
1962         case HCISETACLMTU:
1963                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1964                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1965                 break;
1966
1967         case HCISETSCOMTU:
1968                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1969                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1970                 break;
1971
1972         default:
1973                 err = -EINVAL;
1974                 break;
1975         }
1976
1977 done:
1978         hci_dev_put(hdev);
1979         return err;
1980 }
1981
1982 int hci_get_dev_list(void __user *arg)
1983 {
1984         struct hci_dev *hdev;
1985         struct hci_dev_list_req *dl;
1986         struct hci_dev_req *dr;
1987         int n = 0, size, err;
1988         __u16 dev_num;
1989
1990         if (get_user(dev_num, (__u16 __user *) arg))
1991                 return -EFAULT;
1992
1993         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1994                 return -EINVAL;
1995
1996         size = sizeof(*dl) + dev_num * sizeof(*dr);
1997
1998         dl = kzalloc(size, GFP_KERNEL);
1999         if (!dl)
2000                 return -ENOMEM;
2001
2002         dr = dl->dev_req;
2003
2004         read_lock(&hci_dev_list_lock);
2005         list_for_each_entry(hdev, &hci_dev_list, list) {
2006                 unsigned long flags = hdev->flags;
2007
2008                 /* When the auto-off is configured it means the transport
2009                  * is running, but in that case still indicate that the
2010                  * device is actually down.
2011                  */
2012                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2013                         flags &= ~BIT(HCI_UP);
2014
2015                 (dr + n)->dev_id  = hdev->id;
2016                 (dr + n)->dev_opt = flags;
2017
2018                 if (++n >= dev_num)
2019                         break;
2020         }
2021         read_unlock(&hci_dev_list_lock);
2022
2023         dl->dev_num = n;
2024         size = sizeof(*dl) + n * sizeof(*dr);
2025
2026         err = copy_to_user(arg, dl, size);
2027         kfree(dl);
2028
2029         return err ? -EFAULT : 0;
2030 }
2031
2032 int hci_get_dev_info(void __user *arg)
2033 {
2034         struct hci_dev *hdev;
2035         struct hci_dev_info di;
2036         unsigned long flags;
2037         int err = 0;
2038
2039         if (copy_from_user(&di, arg, sizeof(di)))
2040                 return -EFAULT;
2041
2042         hdev = hci_dev_get(di.dev_id);
2043         if (!hdev)
2044                 return -ENODEV;
2045
2046         /* When the auto-off is configured it means the transport
2047          * is running, but in that case still indicate that the
2048          * device is actually down.
2049          */
2050         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2051                 flags = hdev->flags & ~BIT(HCI_UP);
2052         else
2053                 flags = hdev->flags;
2054
2055         strcpy(di.name, hdev->name);
2056         di.bdaddr   = hdev->bdaddr;
2057         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2058         di.flags    = flags;
2059         di.pkt_type = hdev->pkt_type;
2060         if (lmp_bredr_capable(hdev)) {
2061                 di.acl_mtu  = hdev->acl_mtu;
2062                 di.acl_pkts = hdev->acl_pkts;
2063                 di.sco_mtu  = hdev->sco_mtu;
2064                 di.sco_pkts = hdev->sco_pkts;
2065         } else {
2066                 di.acl_mtu  = hdev->le_mtu;
2067                 di.acl_pkts = hdev->le_pkts;
2068                 di.sco_mtu  = 0;
2069                 di.sco_pkts = 0;
2070         }
2071         di.link_policy = hdev->link_policy;
2072         di.link_mode   = hdev->link_mode;
2073
2074         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2075         memcpy(&di.features, &hdev->features, sizeof(di.features));
2076
2077         if (copy_to_user(arg, &di, sizeof(di)))
2078                 err = -EFAULT;
2079
2080         hci_dev_put(hdev);
2081
2082         return err;
2083 }
2084
2085 /* ---- Interface to HCI drivers ---- */
2086
2087 static int hci_rfkill_set_block(void *data, bool blocked)
2088 {
2089         struct hci_dev *hdev = data;
2090
2091         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2092
2093         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2094                 return -EBUSY;
2095
2096         if (blocked) {
2097                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2098                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2099                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2100                         hci_dev_do_close(hdev);
2101         } else {
2102                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2103         }
2104
2105         return 0;
2106 }
2107
2108 static const struct rfkill_ops hci_rfkill_ops = {
2109         .set_block = hci_rfkill_set_block,
2110 };
2111
2112 static void hci_power_on(struct work_struct *work)
2113 {
2114         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2115         int err;
2116
2117         BT_DBG("%s", hdev->name);
2118
2119         if (test_bit(HCI_UP, &hdev->flags) &&
2120             hci_dev_test_flag(hdev, HCI_MGMT) &&
2121             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2122                 cancel_delayed_work(&hdev->power_off);
2123                 hci_req_sync_lock(hdev);
2124                 err = __hci_req_hci_power_on(hdev);
2125                 hci_req_sync_unlock(hdev);
2126                 mgmt_power_on(hdev, err);
2127                 return;
2128         }
2129
2130         err = hci_dev_do_open(hdev);
2131         if (err < 0) {
2132                 hci_dev_lock(hdev);
2133                 mgmt_set_powered_failed(hdev, err);
2134                 hci_dev_unlock(hdev);
2135                 return;
2136         }
2137
2138         /* During the HCI setup phase, a few error conditions are
2139          * ignored and they need to be checked now. If they are still
2140          * valid, it is important to turn the device back off.
2141          */
2142         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2143             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2144             (hdev->dev_type == HCI_PRIMARY &&
2145              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2146              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2147                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2148                 hci_dev_do_close(hdev);
2149         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2150                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2151                                    HCI_AUTO_OFF_TIMEOUT);
2152         }
2153
2154         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2155                 /* For unconfigured devices, set the HCI_RAW flag
2156                  * so that userspace can easily identify them.
2157                  */
2158                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2159                         set_bit(HCI_RAW, &hdev->flags);
2160
2161                 /* For fully configured devices, this will send
2162                  * the Index Added event. For unconfigured devices,
2163                  * it will send Unconfigued Index Added event.
2164                  *
2165                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2166                  * and no event will be send.
2167                  */
2168                 mgmt_index_added(hdev);
2169         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2170                 /* When the controller is now configured, then it
2171                  * is important to clear the HCI_RAW flag.
2172                  */
2173                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2174                         clear_bit(HCI_RAW, &hdev->flags);
2175
2176                 /* Powering on the controller with HCI_CONFIG set only
2177                  * happens with the transition from unconfigured to
2178                  * configured. This will send the Index Added event.
2179                  */
2180                 mgmt_index_added(hdev);
2181         }
2182 }
2183
2184 static void hci_power_off(struct work_struct *work)
2185 {
2186         struct hci_dev *hdev = container_of(work, struct hci_dev,
2187                                             power_off.work);
2188
2189         BT_DBG("%s", hdev->name);
2190
2191         hci_dev_do_close(hdev);
2192 }
2193
2194 static void hci_error_reset(struct work_struct *work)
2195 {
2196         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2197
2198         BT_DBG("%s", hdev->name);
2199
2200         if (hdev->hw_error)
2201                 hdev->hw_error(hdev, hdev->hw_error_code);
2202         else
2203                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2204
2205         if (hci_dev_do_close(hdev))
2206                 return;
2207
2208         hci_dev_do_open(hdev);
2209 }
2210
2211 void hci_uuids_clear(struct hci_dev *hdev)
2212 {
2213         struct bt_uuid *uuid, *tmp;
2214
2215         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2216                 list_del(&uuid->list);
2217                 kfree(uuid);
2218         }
2219 }
2220
2221 void hci_link_keys_clear(struct hci_dev *hdev)
2222 {
2223         struct link_key *key;
2224
2225         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2226                 list_del_rcu(&key->list);
2227                 kfree_rcu(key, rcu);
2228         }
2229 }
2230
2231 void hci_smp_ltks_clear(struct hci_dev *hdev)
2232 {
2233         struct smp_ltk *k;
2234
2235         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2236                 list_del_rcu(&k->list);
2237                 kfree_rcu(k, rcu);
2238         }
2239 }
2240
2241 void hci_smp_irks_clear(struct hci_dev *hdev)
2242 {
2243         struct smp_irk *k;
2244
2245         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2246                 list_del_rcu(&k->list);
2247                 kfree_rcu(k, rcu);
2248         }
2249 }
2250
2251 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2252 {
2253         struct link_key *k;
2254
2255         rcu_read_lock();
2256         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2257                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2258                         rcu_read_unlock();
2259                         return k;
2260                 }
2261         }
2262         rcu_read_unlock();
2263
2264         return NULL;
2265 }
2266
2267 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2268                                u8 key_type, u8 old_key_type)
2269 {
2270         /* Legacy key */
2271         if (key_type < 0x03)
2272                 return true;
2273
2274         /* Debug keys are insecure so don't store them persistently */
2275         if (key_type == HCI_LK_DEBUG_COMBINATION)
2276                 return false;
2277
2278         /* Changed combination key and there's no previous one */
2279         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2280                 return false;
2281
2282         /* Security mode 3 case */
2283         if (!conn)
2284                 return true;
2285
2286         /* BR/EDR key derived using SC from an LE link */
2287         if (conn->type == LE_LINK)
2288                 return true;
2289
2290         /* Neither local nor remote side had no-bonding as requirement */
2291         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2292                 return true;
2293
2294         /* Local side had dedicated bonding as requirement */
2295         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2296                 return true;
2297
2298         /* Remote side had dedicated bonding as requirement */
2299         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2300                 return true;
2301
2302         /* If none of the above criteria match, then don't store the key
2303          * persistently */
2304         return false;
2305 }
2306
2307 static u8 ltk_role(u8 type)
2308 {
2309         if (type == SMP_LTK)
2310                 return HCI_ROLE_MASTER;
2311
2312         return HCI_ROLE_SLAVE;
2313 }
2314
2315 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2316                              u8 addr_type, u8 role)
2317 {
2318         struct smp_ltk *k;
2319
2320         rcu_read_lock();
2321         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2322                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2323                         continue;
2324
2325                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2326                         rcu_read_unlock();
2327                         return k;
2328                 }
2329         }
2330         rcu_read_unlock();
2331
2332         return NULL;
2333 }
2334
2335 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2336 {
2337         struct smp_irk *irk;
2338
2339         rcu_read_lock();
2340         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2341                 if (!bacmp(&irk->rpa, rpa)) {
2342                         rcu_read_unlock();
2343                         return irk;
2344                 }
2345         }
2346
2347         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2348                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2349                         bacpy(&irk->rpa, rpa);
2350                         rcu_read_unlock();
2351                         return irk;
2352                 }
2353         }
2354         rcu_read_unlock();
2355
2356         return NULL;
2357 }
2358
2359 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2360                                      u8 addr_type)
2361 {
2362         struct smp_irk *irk;
2363
2364         /* Identity Address must be public or static random */
2365         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2366                 return NULL;
2367
2368         rcu_read_lock();
2369         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2370                 if (addr_type == irk->addr_type &&
2371                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2372                         rcu_read_unlock();
2373                         return irk;
2374                 }
2375         }
2376         rcu_read_unlock();
2377
2378         return NULL;
2379 }
2380
2381 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2382                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2383                                   u8 pin_len, bool *persistent)
2384 {
2385         struct link_key *key, *old_key;
2386         u8 old_key_type;
2387
2388         old_key = hci_find_link_key(hdev, bdaddr);
2389         if (old_key) {
2390                 old_key_type = old_key->type;
2391                 key = old_key;
2392         } else {
2393                 old_key_type = conn ? conn->key_type : 0xff;
2394                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2395                 if (!key)
2396                         return NULL;
2397                 list_add_rcu(&key->list, &hdev->link_keys);
2398         }
2399
2400         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2401
2402         /* Some buggy controller combinations generate a changed
2403          * combination key for legacy pairing even when there's no
2404          * previous key */
2405         if (type == HCI_LK_CHANGED_COMBINATION &&
2406             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2407                 type = HCI_LK_COMBINATION;
2408                 if (conn)
2409                         conn->key_type = type;
2410         }
2411
2412         bacpy(&key->bdaddr, bdaddr);
2413         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2414         key->pin_len = pin_len;
2415
2416         if (type == HCI_LK_CHANGED_COMBINATION)
2417                 key->type = old_key_type;
2418         else
2419                 key->type = type;
2420
2421         if (persistent)
2422                 *persistent = hci_persistent_key(hdev, conn, type,
2423                                                  old_key_type);
2424
2425         return key;
2426 }
2427
2428 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2429                             u8 addr_type, u8 type, u8 authenticated,
2430                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2431 {
2432         struct smp_ltk *key, *old_key;
2433         u8 role = ltk_role(type);
2434
2435         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2436         if (old_key)
2437                 key = old_key;
2438         else {
2439                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2440                 if (!key)
2441                         return NULL;
2442                 list_add_rcu(&key->list, &hdev->long_term_keys);
2443         }
2444
2445         bacpy(&key->bdaddr, bdaddr);
2446         key->bdaddr_type = addr_type;
2447         memcpy(key->val, tk, sizeof(key->val));
2448         key->authenticated = authenticated;
2449         key->ediv = ediv;
2450         key->rand = rand;
2451         key->enc_size = enc_size;
2452         key->type = type;
2453
2454         return key;
2455 }
2456
2457 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2458                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2459 {
2460         struct smp_irk *irk;
2461
2462         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2463         if (!irk) {
2464                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2465                 if (!irk)
2466                         return NULL;
2467
2468                 bacpy(&irk->bdaddr, bdaddr);
2469                 irk->addr_type = addr_type;
2470
2471                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2472         }
2473
2474         memcpy(irk->val, val, 16);
2475         bacpy(&irk->rpa, rpa);
2476
2477         return irk;
2478 }
2479
2480 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2481 {
2482         struct link_key *key;
2483
2484         key = hci_find_link_key(hdev, bdaddr);
2485         if (!key)
2486                 return -ENOENT;
2487
2488         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2489
2490         list_del_rcu(&key->list);
2491         kfree_rcu(key, rcu);
2492
2493         return 0;
2494 }
2495
2496 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2497 {
2498         struct smp_ltk *k;
2499         int removed = 0;
2500
2501         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2502                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2503                         continue;
2504
2505                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2506
2507                 list_del_rcu(&k->list);
2508                 kfree_rcu(k, rcu);
2509                 removed++;
2510         }
2511
2512         return removed ? 0 : -ENOENT;
2513 }
2514
2515 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2516 {
2517         struct smp_irk *k;
2518
2519         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2520                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2521                         continue;
2522
2523                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2524
2525                 list_del_rcu(&k->list);
2526                 kfree_rcu(k, rcu);
2527         }
2528 }
2529
2530 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2531 {
2532         struct smp_ltk *k;
2533         struct smp_irk *irk;
2534         u8 addr_type;
2535
2536         if (type == BDADDR_BREDR) {
2537                 if (hci_find_link_key(hdev, bdaddr))
2538                         return true;
2539                 return false;
2540         }
2541
2542         /* Convert to HCI addr type which struct smp_ltk uses */
2543         if (type == BDADDR_LE_PUBLIC)
2544                 addr_type = ADDR_LE_DEV_PUBLIC;
2545         else
2546                 addr_type = ADDR_LE_DEV_RANDOM;
2547
2548         irk = hci_get_irk(hdev, bdaddr, addr_type);
2549         if (irk) {
2550                 bdaddr = &irk->bdaddr;
2551                 addr_type = irk->addr_type;
2552         }
2553
2554         rcu_read_lock();
2555         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2556                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2557                         rcu_read_unlock();
2558                         return true;
2559                 }
2560         }
2561         rcu_read_unlock();
2562
2563         return false;
2564 }
2565
2566 /* HCI command timer function */
2567 static void hci_cmd_timeout(struct work_struct *work)
2568 {
2569         struct hci_dev *hdev = container_of(work, struct hci_dev,
2570                                             cmd_timer.work);
2571
2572         if (hdev->sent_cmd) {
2573                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2574                 u16 opcode = __le16_to_cpu(sent->opcode);
2575
2576                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2577         } else {
2578                 bt_dev_err(hdev, "command tx timeout");
2579         }
2580
2581         atomic_set(&hdev->cmd_cnt, 1);
2582         queue_work(hdev->workqueue, &hdev->cmd_work);
2583 }
2584
2585 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2586                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2587 {
2588         struct oob_data *data;
2589
2590         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2591                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2592                         continue;
2593                 if (data->bdaddr_type != bdaddr_type)
2594                         continue;
2595                 return data;
2596         }
2597
2598         return NULL;
2599 }
2600
2601 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2602                                u8 bdaddr_type)
2603 {
2604         struct oob_data *data;
2605
2606         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2607         if (!data)
2608                 return -ENOENT;
2609
2610         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2611
2612         list_del(&data->list);
2613         kfree(data);
2614
2615         return 0;
2616 }
2617
2618 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2619 {
2620         struct oob_data *data, *n;
2621
2622         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2623                 list_del(&data->list);
2624                 kfree(data);
2625         }
2626 }
2627
2628 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2629                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2630                             u8 *hash256, u8 *rand256)
2631 {
2632         struct oob_data *data;
2633
2634         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2635         if (!data) {
2636                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2637                 if (!data)
2638                         return -ENOMEM;
2639
2640                 bacpy(&data->bdaddr, bdaddr);
2641                 data->bdaddr_type = bdaddr_type;
2642                 list_add(&data->list, &hdev->remote_oob_data);
2643         }
2644
2645         if (hash192 && rand192) {
2646                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2647                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2648                 if (hash256 && rand256)
2649                         data->present = 0x03;
2650         } else {
2651                 memset(data->hash192, 0, sizeof(data->hash192));
2652                 memset(data->rand192, 0, sizeof(data->rand192));
2653                 if (hash256 && rand256)
2654                         data->present = 0x02;
2655                 else
2656                         data->present = 0x00;
2657         }
2658
2659         if (hash256 && rand256) {
2660                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2661                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2662         } else {
2663                 memset(data->hash256, 0, sizeof(data->hash256));
2664                 memset(data->rand256, 0, sizeof(data->rand256));
2665                 if (hash192 && rand192)
2666                         data->present = 0x01;
2667         }
2668
2669         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2670
2671         return 0;
2672 }
2673
2674 /* This function requires the caller holds hdev->lock */
2675 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2676 {
2677         struct adv_info *adv_instance;
2678
2679         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2680                 if (adv_instance->instance == instance)
2681                         return adv_instance;
2682         }
2683
2684         return NULL;
2685 }
2686
2687 /* This function requires the caller holds hdev->lock */
2688 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2689 {
2690         struct adv_info *cur_instance;
2691
2692         cur_instance = hci_find_adv_instance(hdev, instance);
2693         if (!cur_instance)
2694                 return NULL;
2695
2696         if (cur_instance == list_last_entry(&hdev->adv_instances,
2697                                             struct adv_info, list))
2698                 return list_first_entry(&hdev->adv_instances,
2699                                                  struct adv_info, list);
2700         else
2701                 return list_next_entry(cur_instance, list);
2702 }
2703
2704 /* This function requires the caller holds hdev->lock */
2705 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2706 {
2707         struct adv_info *adv_instance;
2708
2709         adv_instance = hci_find_adv_instance(hdev, instance);
2710         if (!adv_instance)
2711                 return -ENOENT;
2712
2713         BT_DBG("%s removing %dMR", hdev->name, instance);
2714
2715         if (hdev->cur_adv_instance == instance) {
2716                 if (hdev->adv_instance_timeout) {
2717                         cancel_delayed_work(&hdev->adv_instance_expire);
2718                         hdev->adv_instance_timeout = 0;
2719                 }
2720                 hdev->cur_adv_instance = 0x00;
2721         }
2722
2723         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2724
2725         list_del(&adv_instance->list);
2726         kfree(adv_instance);
2727
2728         hdev->adv_instance_cnt--;
2729
2730         return 0;
2731 }
2732
2733 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
2734 {
2735         struct adv_info *adv_instance, *n;
2736
2737         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
2738                 adv_instance->rpa_expired = rpa_expired;
2739 }
2740
2741 /* This function requires the caller holds hdev->lock */
2742 void hci_adv_instances_clear(struct hci_dev *hdev)
2743 {
2744         struct adv_info *adv_instance, *n;
2745
2746         if (hdev->adv_instance_timeout) {
2747                 cancel_delayed_work(&hdev->adv_instance_expire);
2748                 hdev->adv_instance_timeout = 0;
2749         }
2750
2751         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2752                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2753                 list_del(&adv_instance->list);
2754                 kfree(adv_instance);
2755         }
2756
2757         hdev->adv_instance_cnt = 0;
2758         hdev->cur_adv_instance = 0x00;
2759 }
2760
2761 static void adv_instance_rpa_expired(struct work_struct *work)
2762 {
2763         struct adv_info *adv_instance = container_of(work, struct adv_info,
2764                                                      rpa_expired_cb.work);
2765
2766         BT_DBG("");
2767
2768         adv_instance->rpa_expired = true;
2769 }
2770
2771 /* This function requires the caller holds hdev->lock */
2772 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2773                          u16 adv_data_len, u8 *adv_data,
2774                          u16 scan_rsp_len, u8 *scan_rsp_data,
2775                          u16 timeout, u16 duration)
2776 {
2777         struct adv_info *adv_instance;
2778
2779         adv_instance = hci_find_adv_instance(hdev, instance);
2780         if (adv_instance) {
2781                 memset(adv_instance->adv_data, 0,
2782                        sizeof(adv_instance->adv_data));
2783                 memset(adv_instance->scan_rsp_data, 0,
2784                        sizeof(adv_instance->scan_rsp_data));
2785         } else {
2786                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2787                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2788                         return -EOVERFLOW;
2789
2790                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2791                 if (!adv_instance)
2792                         return -ENOMEM;
2793
2794                 adv_instance->pending = true;
2795                 adv_instance->instance = instance;
2796                 list_add(&adv_instance->list, &hdev->adv_instances);
2797                 hdev->adv_instance_cnt++;
2798         }
2799
2800         adv_instance->flags = flags;
2801         adv_instance->adv_data_len = adv_data_len;
2802         adv_instance->scan_rsp_len = scan_rsp_len;
2803
2804         if (adv_data_len)
2805                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2806
2807         if (scan_rsp_len)
2808                 memcpy(adv_instance->scan_rsp_data,
2809                        scan_rsp_data, scan_rsp_len);
2810
2811         adv_instance->timeout = timeout;
2812         adv_instance->remaining_time = timeout;
2813
2814         if (duration == 0)
2815                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2816         else
2817                 adv_instance->duration = duration;
2818
2819         adv_instance->tx_power = HCI_TX_POWER_INVALID;
2820
2821         INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
2822                           adv_instance_rpa_expired);
2823
2824         BT_DBG("%s for %dMR", hdev->name, instance);
2825
2826         return 0;
2827 }
2828
2829 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2830                                          bdaddr_t *bdaddr, u8 type)
2831 {
2832         struct bdaddr_list *b;
2833
2834         list_for_each_entry(b, bdaddr_list, list) {
2835                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2836                         return b;
2837         }
2838
2839         return NULL;
2840 }
2841
2842 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2843                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2844                                 u8 type)
2845 {
2846         struct bdaddr_list_with_irk *b;
2847
2848         list_for_each_entry(b, bdaddr_list, list) {
2849                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2850                         return b;
2851         }
2852
2853         return NULL;
2854 }
2855
2856 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2857 {
2858         struct bdaddr_list *b, *n;
2859
2860         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2861                 list_del(&b->list);
2862                 kfree(b);
2863         }
2864 }
2865
2866 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2867 {
2868         struct bdaddr_list *entry;
2869
2870         if (!bacmp(bdaddr, BDADDR_ANY))
2871                 return -EBADF;
2872
2873         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2874                 return -EEXIST;
2875
2876         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2877         if (!entry)
2878                 return -ENOMEM;
2879
2880         bacpy(&entry->bdaddr, bdaddr);
2881         entry->bdaddr_type = type;
2882
2883         list_add(&entry->list, list);
2884
2885         return 0;
2886 }
2887
2888 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2889                                         u8 type, u8 *peer_irk, u8 *local_irk)
2890 {
2891         struct bdaddr_list_with_irk *entry;
2892
2893         if (!bacmp(bdaddr, BDADDR_ANY))
2894                 return -EBADF;
2895
2896         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2897                 return -EEXIST;
2898
2899         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2900         if (!entry)
2901                 return -ENOMEM;
2902
2903         bacpy(&entry->bdaddr, bdaddr);
2904         entry->bdaddr_type = type;
2905
2906         if (peer_irk)
2907                 memcpy(entry->peer_irk, peer_irk, 16);
2908
2909         if (local_irk)
2910                 memcpy(entry->local_irk, local_irk, 16);
2911
2912         list_add(&entry->list, list);
2913
2914         return 0;
2915 }
2916
2917 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2918 {
2919         struct bdaddr_list *entry;
2920
2921         if (!bacmp(bdaddr, BDADDR_ANY)) {
2922                 hci_bdaddr_list_clear(list);
2923                 return 0;
2924         }
2925
2926         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2927         if (!entry)
2928                 return -ENOENT;
2929
2930         list_del(&entry->list);
2931         kfree(entry);
2932
2933         return 0;
2934 }
2935
2936 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2937                                                         u8 type)
2938 {
2939         struct bdaddr_list_with_irk *entry;
2940
2941         if (!bacmp(bdaddr, BDADDR_ANY)) {
2942                 hci_bdaddr_list_clear(list);
2943                 return 0;
2944         }
2945
2946         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2947         if (!entry)
2948                 return -ENOENT;
2949
2950         list_del(&entry->list);
2951         kfree(entry);
2952
2953         return 0;
2954 }
2955
2956 /* This function requires the caller holds hdev->lock */
2957 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2958                                                bdaddr_t *addr, u8 addr_type)
2959 {
2960         struct hci_conn_params *params;
2961
2962         list_for_each_entry(params, &hdev->le_conn_params, list) {
2963                 if (bacmp(&params->addr, addr) == 0 &&
2964                     params->addr_type == addr_type) {
2965                         return params;
2966                 }
2967         }
2968
2969         return NULL;
2970 }
2971
2972 /* This function requires the caller holds hdev->lock */
2973 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2974                                                   bdaddr_t *addr, u8 addr_type)
2975 {
2976         struct hci_conn_params *param;
2977
2978         list_for_each_entry(param, list, action) {
2979                 if (bacmp(&param->addr, addr) == 0 &&
2980                     param->addr_type == addr_type)
2981                         return param;
2982         }
2983
2984         return NULL;
2985 }
2986
2987 /* This function requires the caller holds hdev->lock */
2988 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2989                                             bdaddr_t *addr, u8 addr_type)
2990 {
2991         struct hci_conn_params *params;
2992
2993         params = hci_conn_params_lookup(hdev, addr, addr_type);
2994         if (params)
2995                 return params;
2996
2997         params = kzalloc(sizeof(*params), GFP_KERNEL);
2998         if (!params) {
2999                 bt_dev_err(hdev, "out of memory");
3000                 return NULL;
3001         }
3002
3003         bacpy(&params->addr, addr);
3004         params->addr_type = addr_type;
3005
3006         list_add(&params->list, &hdev->le_conn_params);
3007         INIT_LIST_HEAD(&params->action);
3008
3009         params->conn_min_interval = hdev->le_conn_min_interval;
3010         params->conn_max_interval = hdev->le_conn_max_interval;
3011         params->conn_latency = hdev->le_conn_latency;
3012         params->supervision_timeout = hdev->le_supv_timeout;
3013         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3014
3015         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3016
3017         return params;
3018 }
3019
3020 static void hci_conn_params_free(struct hci_conn_params *params)
3021 {
3022         if (params->conn) {
3023                 hci_conn_drop(params->conn);
3024                 hci_conn_put(params->conn);
3025         }
3026
3027         list_del(&params->action);
3028         list_del(&params->list);
3029         kfree(params);
3030 }
3031
3032 /* This function requires the caller holds hdev->lock */
3033 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3034 {
3035         struct hci_conn_params *params;
3036
3037         params = hci_conn_params_lookup(hdev, addr, addr_type);
3038         if (!params)
3039                 return;
3040
3041         hci_conn_params_free(params);
3042
3043         hci_update_background_scan(hdev);
3044
3045         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3046 }
3047
3048 /* This function requires the caller holds hdev->lock */
3049 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3050 {
3051         struct hci_conn_params *params, *tmp;
3052
3053         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3054                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3055                         continue;
3056
3057                 /* If trying to estabilish one time connection to disabled
3058                  * device, leave the params, but mark them as just once.
3059                  */
3060                 if (params->explicit_connect) {
3061                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3062                         continue;
3063                 }
3064
3065                 list_del(&params->list);
3066                 kfree(params);
3067         }
3068
3069         BT_DBG("All LE disabled connection parameters were removed");
3070 }
3071
3072 /* This function requires the caller holds hdev->lock */
3073 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3074 {
3075         struct hci_conn_params *params, *tmp;
3076
3077         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3078                 hci_conn_params_free(params);
3079
3080         BT_DBG("All LE connection parameters were removed");
3081 }
3082
3083 /* Copy the Identity Address of the controller.
3084  *
3085  * If the controller has a public BD_ADDR, then by default use that one.
3086  * If this is a LE only controller without a public address, default to
3087  * the static random address.
3088  *
3089  * For debugging purposes it is possible to force controllers with a
3090  * public address to use the static random address instead.
3091  *
3092  * In case BR/EDR has been disabled on a dual-mode controller and
3093  * userspace has configured a static address, then that address
3094  * becomes the identity address instead of the public BR/EDR address.
3095  */
3096 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3097                                u8 *bdaddr_type)
3098 {
3099         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3100             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3101             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3102              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3103                 bacpy(bdaddr, &hdev->static_addr);
3104                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3105         } else {
3106                 bacpy(bdaddr, &hdev->bdaddr);
3107                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3108         }
3109 }
3110
3111 /* Alloc HCI device */
3112 struct hci_dev *hci_alloc_dev(void)
3113 {
3114         struct hci_dev *hdev;
3115
3116         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3117         if (!hdev)
3118                 return NULL;
3119
3120         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3121         hdev->esco_type = (ESCO_HV1);
3122         hdev->link_mode = (HCI_LM_ACCEPT);
3123         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3124         hdev->io_capability = 0x03;     /* No Input No Output */
3125         hdev->manufacturer = 0xffff;    /* Default to internal use */
3126         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3127         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3128         hdev->adv_instance_cnt = 0;
3129         hdev->cur_adv_instance = 0x00;
3130         hdev->adv_instance_timeout = 0;
3131
3132         hdev->sniff_max_interval = 800;
3133         hdev->sniff_min_interval = 80;
3134
3135         hdev->le_adv_channel_map = 0x07;
3136         hdev->le_adv_min_interval = 0x0800;
3137         hdev->le_adv_max_interval = 0x0800;
3138         hdev->le_scan_interval = 0x0060;
3139         hdev->le_scan_window = 0x0030;
3140         hdev->le_conn_min_interval = 0x0018;
3141         hdev->le_conn_max_interval = 0x0028;
3142         hdev->le_conn_latency = 0x0000;
3143         hdev->le_supv_timeout = 0x002a;
3144         hdev->le_def_tx_len = 0x001b;
3145         hdev->le_def_tx_time = 0x0148;
3146         hdev->le_max_tx_len = 0x001b;
3147         hdev->le_max_tx_time = 0x0148;
3148         hdev->le_max_rx_len = 0x001b;
3149         hdev->le_max_rx_time = 0x0148;
3150         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3151         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3152         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3153         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3154
3155         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3156         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3157         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3158         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3159
3160         mutex_init(&hdev->lock);
3161         mutex_init(&hdev->req_lock);
3162
3163         INIT_LIST_HEAD(&hdev->mgmt_pending);
3164         INIT_LIST_HEAD(&hdev->blacklist);
3165         INIT_LIST_HEAD(&hdev->whitelist);
3166         INIT_LIST_HEAD(&hdev->uuids);
3167         INIT_LIST_HEAD(&hdev->link_keys);
3168         INIT_LIST_HEAD(&hdev->long_term_keys);
3169         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3170         INIT_LIST_HEAD(&hdev->remote_oob_data);
3171         INIT_LIST_HEAD(&hdev->le_white_list);
3172         INIT_LIST_HEAD(&hdev->le_resolv_list);
3173         INIT_LIST_HEAD(&hdev->le_conn_params);
3174         INIT_LIST_HEAD(&hdev->pend_le_conns);
3175         INIT_LIST_HEAD(&hdev->pend_le_reports);
3176         INIT_LIST_HEAD(&hdev->conn_hash.list);
3177         INIT_LIST_HEAD(&hdev->adv_instances);
3178
3179         INIT_WORK(&hdev->rx_work, hci_rx_work);
3180         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3181         INIT_WORK(&hdev->tx_work, hci_tx_work);
3182         INIT_WORK(&hdev->power_on, hci_power_on);
3183         INIT_WORK(&hdev->error_reset, hci_error_reset);
3184
3185         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3186
3187         skb_queue_head_init(&hdev->rx_q);
3188         skb_queue_head_init(&hdev->cmd_q);
3189         skb_queue_head_init(&hdev->raw_q);
3190
3191         init_waitqueue_head(&hdev->req_wait_q);
3192
3193         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3194
3195         hci_request_setup(hdev);
3196
3197         hci_init_sysfs(hdev);
3198         discovery_init(hdev);
3199
3200         return hdev;
3201 }
3202 EXPORT_SYMBOL(hci_alloc_dev);
3203
3204 /* Free HCI device */
3205 void hci_free_dev(struct hci_dev *hdev)
3206 {
3207         /* will free via device release */
3208         put_device(&hdev->dev);
3209 }
3210 EXPORT_SYMBOL(hci_free_dev);
3211
3212 /* Register HCI device */
3213 int hci_register_dev(struct hci_dev *hdev)
3214 {
3215         int id, error;
3216
3217         if (!hdev->open || !hdev->close || !hdev->send)
3218                 return -EINVAL;
3219
3220         /* Do not allow HCI_AMP devices to register at index 0,
3221          * so the index can be used as the AMP controller ID.
3222          */
3223         switch (hdev->dev_type) {
3224         case HCI_PRIMARY:
3225                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3226                 break;
3227         case HCI_AMP:
3228                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3229                 break;
3230         default:
3231                 return -EINVAL;
3232         }
3233
3234         if (id < 0)
3235                 return id;
3236
3237         sprintf(hdev->name, "hci%d", id);
3238         hdev->id = id;
3239
3240         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3241
3242         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3243         if (!hdev->workqueue) {
3244                 error = -ENOMEM;
3245                 goto err;
3246         }
3247
3248         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3249                                                       hdev->name);
3250         if (!hdev->req_workqueue) {
3251                 destroy_workqueue(hdev->workqueue);
3252                 error = -ENOMEM;
3253                 goto err;
3254         }
3255
3256         if (!IS_ERR_OR_NULL(bt_debugfs))
3257                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3258
3259         dev_set_name(&hdev->dev, "%s", hdev->name);
3260
3261         error = device_add(&hdev->dev);
3262         if (error < 0)
3263                 goto err_wqueue;
3264
3265         hci_leds_init(hdev);
3266
3267         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3268                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3269                                     hdev);
3270         if (hdev->rfkill) {
3271                 if (rfkill_register(hdev->rfkill) < 0) {
3272                         rfkill_destroy(hdev->rfkill);
3273                         hdev->rfkill = NULL;
3274                 }
3275         }
3276
3277         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3278                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3279
3280         hci_dev_set_flag(hdev, HCI_SETUP);
3281         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3282
3283         if (hdev->dev_type == HCI_PRIMARY) {
3284                 /* Assume BR/EDR support until proven otherwise (such as
3285                  * through reading supported features during init.
3286                  */
3287                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3288         }
3289
3290         write_lock(&hci_dev_list_lock);
3291         list_add(&hdev->list, &hci_dev_list);
3292         write_unlock(&hci_dev_list_lock);
3293
3294         /* Devices that are marked for raw-only usage are unconfigured
3295          * and should not be included in normal operation.
3296          */
3297         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3298                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3299
3300         hci_sock_dev_event(hdev, HCI_DEV_REG);
3301         hci_dev_hold(hdev);
3302
3303         queue_work(hdev->req_workqueue, &hdev->power_on);
3304
3305         return id;
3306
3307 err_wqueue:
3308         destroy_workqueue(hdev->workqueue);
3309         destroy_workqueue(hdev->req_workqueue);
3310 err:
3311         ida_simple_remove(&hci_index_ida, hdev->id);
3312
3313         return error;
3314 }
3315 EXPORT_SYMBOL(hci_register_dev);
3316
3317 /* Unregister HCI device */
3318 void hci_unregister_dev(struct hci_dev *hdev)
3319 {
3320         int id;
3321
3322         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3323
3324         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3325
3326         id = hdev->id;
3327
3328         write_lock(&hci_dev_list_lock);
3329         list_del(&hdev->list);
3330         write_unlock(&hci_dev_list_lock);
3331
3332         cancel_work_sync(&hdev->power_on);
3333
3334         hci_dev_do_close(hdev);
3335
3336         if (!test_bit(HCI_INIT, &hdev->flags) &&
3337             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3338             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3339                 hci_dev_lock(hdev);
3340                 mgmt_index_removed(hdev);
3341                 hci_dev_unlock(hdev);
3342         }
3343
3344         /* mgmt_index_removed should take care of emptying the
3345          * pending list */
3346         BUG_ON(!list_empty(&hdev->mgmt_pending));
3347
3348         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3349
3350         if (hdev->rfkill) {
3351                 rfkill_unregister(hdev->rfkill);
3352                 rfkill_destroy(hdev->rfkill);
3353         }
3354
3355         device_del(&hdev->dev);
3356
3357         debugfs_remove_recursive(hdev->debugfs);
3358         kfree_const(hdev->hw_info);
3359         kfree_const(hdev->fw_info);
3360
3361         destroy_workqueue(hdev->workqueue);
3362         destroy_workqueue(hdev->req_workqueue);
3363
3364         hci_dev_lock(hdev);
3365         hci_bdaddr_list_clear(&hdev->blacklist);
3366         hci_bdaddr_list_clear(&hdev->whitelist);
3367         hci_uuids_clear(hdev);
3368         hci_link_keys_clear(hdev);
3369         hci_smp_ltks_clear(hdev);
3370         hci_smp_irks_clear(hdev);
3371         hci_remote_oob_data_clear(hdev);
3372         hci_adv_instances_clear(hdev);
3373         hci_bdaddr_list_clear(&hdev->le_white_list);
3374         hci_bdaddr_list_clear(&hdev->le_resolv_list);
3375         hci_conn_params_clear_all(hdev);
3376         hci_discovery_filter_clear(hdev);
3377         hci_dev_unlock(hdev);
3378
3379         hci_dev_put(hdev);
3380
3381         ida_simple_remove(&hci_index_ida, id);
3382 }
3383 EXPORT_SYMBOL(hci_unregister_dev);
3384
3385 /* Suspend HCI device */
3386 int hci_suspend_dev(struct hci_dev *hdev)
3387 {
3388         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3389         return 0;
3390 }
3391 EXPORT_SYMBOL(hci_suspend_dev);
3392
3393 /* Resume HCI device */
3394 int hci_resume_dev(struct hci_dev *hdev)
3395 {
3396         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3397         return 0;
3398 }
3399 EXPORT_SYMBOL(hci_resume_dev);
3400
3401 /* Reset HCI device */
3402 int hci_reset_dev(struct hci_dev *hdev)
3403 {
3404         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3405         struct sk_buff *skb;
3406
3407         skb = bt_skb_alloc(3, GFP_ATOMIC);
3408         if (!skb)
3409                 return -ENOMEM;
3410
3411         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3412         skb_put_data(skb, hw_err, 3);
3413
3414         /* Send Hardware Error to upper stack */
3415         return hci_recv_frame(hdev, skb);
3416 }
3417 EXPORT_SYMBOL(hci_reset_dev);
3418
3419 /* Receive frame from HCI drivers */
3420 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3421 {
3422         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3423                       && !test_bit(HCI_INIT, &hdev->flags))) {
3424                 kfree_skb(skb);
3425                 return -ENXIO;
3426         }
3427
3428         if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3429             hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3430             hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3431                 kfree_skb(skb);
3432                 return -EINVAL;
3433         }
3434
3435         /* Incoming skb */
3436         bt_cb(skb)->incoming = 1;
3437
3438         /* Time stamp */
3439         __net_timestamp(skb);
3440
3441         skb_queue_tail(&hdev->rx_q, skb);
3442         queue_work(hdev->workqueue, &hdev->rx_work);
3443
3444         return 0;
3445 }
3446 EXPORT_SYMBOL(hci_recv_frame);
3447
3448 /* Receive diagnostic message from HCI drivers */
3449 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3450 {
3451         /* Mark as diagnostic packet */
3452         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3453
3454         /* Time stamp */
3455         __net_timestamp(skb);
3456
3457         skb_queue_tail(&hdev->rx_q, skb);
3458         queue_work(hdev->workqueue, &hdev->rx_work);
3459
3460         return 0;
3461 }
3462 EXPORT_SYMBOL(hci_recv_diag);
3463
3464 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3465 {
3466         va_list vargs;
3467
3468         va_start(vargs, fmt);
3469         kfree_const(hdev->hw_info);
3470         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3471         va_end(vargs);
3472 }
3473 EXPORT_SYMBOL(hci_set_hw_info);
3474
3475 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3476 {
3477         va_list vargs;
3478
3479         va_start(vargs, fmt);
3480         kfree_const(hdev->fw_info);
3481         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3482         va_end(vargs);
3483 }
3484 EXPORT_SYMBOL(hci_set_fw_info);
3485
3486 /* ---- Interface to upper protocols ---- */
3487
3488 int hci_register_cb(struct hci_cb *cb)
3489 {
3490         BT_DBG("%p name %s", cb, cb->name);
3491
3492         mutex_lock(&hci_cb_list_lock);
3493         list_add_tail(&cb->list, &hci_cb_list);
3494         mutex_unlock(&hci_cb_list_lock);
3495
3496         return 0;
3497 }
3498 EXPORT_SYMBOL(hci_register_cb);
3499
3500 int hci_unregister_cb(struct hci_cb *cb)
3501 {
3502         BT_DBG("%p name %s", cb, cb->name);
3503
3504         mutex_lock(&hci_cb_list_lock);
3505         list_del(&cb->list);
3506         mutex_unlock(&hci_cb_list_lock);
3507
3508         return 0;
3509 }
3510 EXPORT_SYMBOL(hci_unregister_cb);
3511
3512 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3513 {
3514         int err;
3515
3516         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3517                skb->len);
3518
3519         /* Time stamp */
3520         __net_timestamp(skb);
3521
3522         /* Send copy to monitor */
3523         hci_send_to_monitor(hdev, skb);
3524
3525         if (atomic_read(&hdev->promisc)) {
3526                 /* Send copy to the sockets */
3527                 hci_send_to_sock(hdev, skb);
3528         }
3529
3530         /* Get rid of skb owner, prior to sending to the driver. */
3531         skb_orphan(skb);
3532
3533         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3534                 kfree_skb(skb);
3535                 return;
3536         }
3537
3538         err = hdev->send(hdev, skb);
3539         if (err < 0) {
3540                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3541                 kfree_skb(skb);
3542         }
3543 }
3544
3545 /* Send HCI command */
3546 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3547                  const void *param)
3548 {
3549         struct sk_buff *skb;
3550
3551         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3552
3553         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3554         if (!skb) {
3555                 bt_dev_err(hdev, "no memory for command");
3556                 return -ENOMEM;
3557         }
3558
3559         /* Stand-alone HCI commands must be flagged as
3560          * single-command requests.
3561          */
3562         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3563
3564         skb_queue_tail(&hdev->cmd_q, skb);
3565         queue_work(hdev->workqueue, &hdev->cmd_work);
3566
3567         return 0;
3568 }
3569
3570 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3571                    const void *param)
3572 {
3573         struct sk_buff *skb;
3574
3575         if (hci_opcode_ogf(opcode) != 0x3f) {
3576                 /* A controller receiving a command shall respond with either
3577                  * a Command Status Event or a Command Complete Event.
3578                  * Therefore, all standard HCI commands must be sent via the
3579                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3580                  * Some vendors do not comply with this rule for vendor-specific
3581                  * commands and do not return any event. We want to support
3582                  * unresponded commands for such cases only.
3583                  */
3584                 bt_dev_err(hdev, "unresponded command not supported");
3585                 return -EINVAL;
3586         }
3587
3588         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3589         if (!skb) {
3590                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3591                            opcode);
3592                 return -ENOMEM;
3593         }
3594
3595         hci_send_frame(hdev, skb);
3596
3597         return 0;
3598 }
3599 EXPORT_SYMBOL(__hci_cmd_send);
3600
3601 /* Get data from the previously sent command */
3602 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3603 {
3604         struct hci_command_hdr *hdr;
3605
3606         if (!hdev->sent_cmd)
3607                 return NULL;
3608
3609         hdr = (void *) hdev->sent_cmd->data;
3610
3611         if (hdr->opcode != cpu_to_le16(opcode))
3612                 return NULL;
3613
3614         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3615
3616         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3617 }
3618
3619 /* Send HCI command and wait for command commplete event */
3620 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3621                              const void *param, u32 timeout)
3622 {
3623         struct sk_buff *skb;
3624
3625         if (!test_bit(HCI_UP, &hdev->flags))
3626                 return ERR_PTR(-ENETDOWN);
3627
3628         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3629
3630         hci_req_sync_lock(hdev);
3631         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3632         hci_req_sync_unlock(hdev);
3633
3634         return skb;
3635 }
3636 EXPORT_SYMBOL(hci_cmd_sync);
3637
3638 /* Send ACL data */
3639 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3640 {
3641         struct hci_acl_hdr *hdr;
3642         int len = skb->len;
3643
3644         skb_push(skb, HCI_ACL_HDR_SIZE);
3645         skb_reset_transport_header(skb);
3646         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3647         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3648         hdr->dlen   = cpu_to_le16(len);
3649 }
3650
3651 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3652                           struct sk_buff *skb, __u16 flags)
3653 {
3654         struct hci_conn *conn = chan->conn;
3655         struct hci_dev *hdev = conn->hdev;
3656         struct sk_buff *list;
3657
3658         skb->len = skb_headlen(skb);
3659         skb->data_len = 0;
3660
3661         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3662
3663         switch (hdev->dev_type) {
3664         case HCI_PRIMARY:
3665                 hci_add_acl_hdr(skb, conn->handle, flags);
3666                 break;
3667         case HCI_AMP:
3668                 hci_add_acl_hdr(skb, chan->handle, flags);
3669                 break;
3670         default:
3671                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3672                 return;
3673         }
3674
3675         list = skb_shinfo(skb)->frag_list;
3676         if (!list) {
3677                 /* Non fragmented */
3678                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3679
3680                 skb_queue_tail(queue, skb);
3681         } else {
3682                 /* Fragmented */
3683                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3684
3685                 skb_shinfo(skb)->frag_list = NULL;
3686
3687                 /* Queue all fragments atomically. We need to use spin_lock_bh
3688                  * here because of 6LoWPAN links, as there this function is
3689                  * called from softirq and using normal spin lock could cause
3690                  * deadlocks.
3691                  */
3692                 spin_lock_bh(&queue->lock);
3693
3694                 __skb_queue_tail(queue, skb);
3695
3696                 flags &= ~ACL_START;
3697                 flags |= ACL_CONT;
3698                 do {
3699                         skb = list; list = list->next;
3700
3701                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3702                         hci_add_acl_hdr(skb, conn->handle, flags);
3703
3704                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3705
3706                         __skb_queue_tail(queue, skb);
3707                 } while (list);
3708
3709                 spin_unlock_bh(&queue->lock);
3710         }
3711 }
3712
3713 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3714 {
3715         struct hci_dev *hdev = chan->conn->hdev;
3716
3717         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3718
3719         hci_queue_acl(chan, &chan->data_q, skb, flags);
3720
3721         queue_work(hdev->workqueue, &hdev->tx_work);
3722 }
3723
3724 /* Send SCO data */
3725 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3726 {
3727         struct hci_dev *hdev = conn->hdev;
3728         struct hci_sco_hdr hdr;
3729
3730         BT_DBG("%s len %d", hdev->name, skb->len);
3731
3732         hdr.handle = cpu_to_le16(conn->handle);
3733         hdr.dlen   = skb->len;
3734
3735         skb_push(skb, HCI_SCO_HDR_SIZE);
3736         skb_reset_transport_header(skb);
3737         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3738
3739         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3740
3741         skb_queue_tail(&conn->data_q, skb);
3742         queue_work(hdev->workqueue, &hdev->tx_work);
3743 }
3744
3745 /* ---- HCI TX task (outgoing data) ---- */
3746
3747 /* HCI Connection scheduler */
3748 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3749                                      int *quote)
3750 {
3751         struct hci_conn_hash *h = &hdev->conn_hash;
3752         struct hci_conn *conn = NULL, *c;
3753         unsigned int num = 0, min = ~0;
3754
3755         /* We don't have to lock device here. Connections are always
3756          * added and removed with TX task disabled. */
3757
3758         rcu_read_lock();
3759
3760         list_for_each_entry_rcu(c, &h->list, list) {
3761                 if (c->type != type || skb_queue_empty(&c->data_q))
3762                         continue;
3763
3764                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3765                         continue;
3766
3767                 num++;
3768
3769                 if (c->sent < min) {
3770                         min  = c->sent;
3771                         conn = c;
3772                 }
3773
3774                 if (hci_conn_num(hdev, type) == num)
3775                         break;
3776         }
3777
3778         rcu_read_unlock();
3779
3780         if (conn) {
3781                 int cnt, q;
3782
3783                 switch (conn->type) {
3784                 case ACL_LINK:
3785                         cnt = hdev->acl_cnt;
3786                         break;
3787                 case SCO_LINK:
3788                 case ESCO_LINK:
3789                         cnt = hdev->sco_cnt;
3790                         break;
3791                 case LE_LINK:
3792                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3793                         break;
3794                 default:
3795                         cnt = 0;
3796                         bt_dev_err(hdev, "unknown link type %d", conn->type);
3797                 }
3798
3799                 q = cnt / num;
3800                 *quote = q ? q : 1;
3801         } else
3802                 *quote = 0;
3803
3804         BT_DBG("conn %p quote %d", conn, *quote);
3805         return conn;
3806 }
3807
3808 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3809 {
3810         struct hci_conn_hash *h = &hdev->conn_hash;
3811         struct hci_conn *c;
3812
3813         bt_dev_err(hdev, "link tx timeout");
3814
3815         rcu_read_lock();
3816
3817         /* Kill stalled connections */
3818         list_for_each_entry_rcu(c, &h->list, list) {
3819                 if (c->type == type && c->sent) {
3820                         bt_dev_err(hdev, "killing stalled connection %pMR",
3821                                    &c->dst);
3822                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3823                 }
3824         }
3825
3826         rcu_read_unlock();
3827 }
3828
3829 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3830                                       int *quote)
3831 {
3832         struct hci_conn_hash *h = &hdev->conn_hash;
3833         struct hci_chan *chan = NULL;
3834         unsigned int num = 0, min = ~0, cur_prio = 0;
3835         struct hci_conn *conn;
3836         int cnt, q, conn_num = 0;
3837
3838         BT_DBG("%s", hdev->name);
3839
3840         rcu_read_lock();
3841
3842         list_for_each_entry_rcu(conn, &h->list, list) {
3843                 struct hci_chan *tmp;
3844
3845                 if (conn->type != type)
3846                         continue;
3847
3848                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3849                         continue;
3850
3851                 conn_num++;
3852
3853                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3854                         struct sk_buff *skb;
3855
3856                         if (skb_queue_empty(&tmp->data_q))
3857                                 continue;
3858
3859                         skb = skb_peek(&tmp->data_q);
3860                         if (skb->priority < cur_prio)
3861                                 continue;
3862
3863                         if (skb->priority > cur_prio) {
3864                                 num = 0;
3865                                 min = ~0;
3866                                 cur_prio = skb->priority;
3867                         }
3868
3869                         num++;
3870
3871                         if (conn->sent < min) {
3872                                 min  = conn->sent;
3873                                 chan = tmp;
3874                         }
3875                 }
3876
3877                 if (hci_conn_num(hdev, type) == conn_num)
3878                         break;
3879         }
3880
3881         rcu_read_unlock();
3882
3883         if (!chan)
3884                 return NULL;
3885
3886         switch (chan->conn->type) {
3887         case ACL_LINK:
3888                 cnt = hdev->acl_cnt;
3889                 break;
3890         case AMP_LINK:
3891                 cnt = hdev->block_cnt;
3892                 break;
3893         case SCO_LINK:
3894         case ESCO_LINK:
3895                 cnt = hdev->sco_cnt;
3896                 break;
3897         case LE_LINK:
3898                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3899                 break;
3900         default:
3901                 cnt = 0;
3902                 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3903         }
3904
3905         q = cnt / num;
3906         *quote = q ? q : 1;
3907         BT_DBG("chan %p quote %d", chan, *quote);
3908         return chan;
3909 }
3910
3911 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3912 {
3913         struct hci_conn_hash *h = &hdev->conn_hash;
3914         struct hci_conn *conn;
3915         int num = 0;
3916
3917         BT_DBG("%s", hdev->name);
3918
3919         rcu_read_lock();
3920
3921         list_for_each_entry_rcu(conn, &h->list, list) {
3922                 struct hci_chan *chan;
3923
3924                 if (conn->type != type)
3925                         continue;
3926
3927                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3928                         continue;
3929
3930                 num++;
3931
3932                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3933                         struct sk_buff *skb;
3934
3935                         if (chan->sent) {
3936                                 chan->sent = 0;
3937                                 continue;
3938                         }
3939
3940                         if (skb_queue_empty(&chan->data_q))
3941                                 continue;
3942
3943                         skb = skb_peek(&chan->data_q);
3944                         if (skb->priority >= HCI_PRIO_MAX - 1)
3945                                 continue;
3946
3947                         skb->priority = HCI_PRIO_MAX - 1;
3948
3949                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3950                                skb->priority);
3951                 }
3952
3953                 if (hci_conn_num(hdev, type) == num)
3954                         break;
3955         }
3956
3957         rcu_read_unlock();
3958
3959 }
3960
3961 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3962 {
3963         /* Calculate count of blocks used by this packet */
3964         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3965 }
3966
3967 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3968 {
3969         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3970                 /* ACL tx timeout must be longer than maximum
3971                  * link supervision timeout (40.9 seconds) */
3972                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3973                                        HCI_ACL_TX_TIMEOUT))
3974                         hci_link_tx_to(hdev, ACL_LINK);
3975         }
3976 }
3977
3978 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3979 {
3980         unsigned int cnt = hdev->acl_cnt;
3981         struct hci_chan *chan;
3982         struct sk_buff *skb;
3983         int quote;
3984
3985         __check_timeout(hdev, cnt);
3986
3987         while (hdev->acl_cnt &&
3988                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3989                 u32 priority = (skb_peek(&chan->data_q))->priority;
3990                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3991                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3992                                skb->len, skb->priority);
3993
3994                         /* Stop if priority has changed */
3995                         if (skb->priority < priority)
3996                                 break;
3997
3998                         skb = skb_dequeue(&chan->data_q);
3999
4000                         hci_conn_enter_active_mode(chan->conn,
4001                                                    bt_cb(skb)->force_active);
4002
4003                         hci_send_frame(hdev, skb);
4004                         hdev->acl_last_tx = jiffies;
4005
4006                         hdev->acl_cnt--;
4007                         chan->sent++;
4008                         chan->conn->sent++;
4009                 }
4010         }
4011
4012         if (cnt != hdev->acl_cnt)
4013                 hci_prio_recalculate(hdev, ACL_LINK);
4014 }
4015
4016 static void hci_sched_acl_blk(struct hci_dev *hdev)
4017 {
4018         unsigned int cnt = hdev->block_cnt;
4019         struct hci_chan *chan;
4020         struct sk_buff *skb;
4021         int quote;
4022         u8 type;
4023
4024         __check_timeout(hdev, cnt);
4025
4026         BT_DBG("%s", hdev->name);
4027
4028         if (hdev->dev_type == HCI_AMP)
4029                 type = AMP_LINK;
4030         else
4031                 type = ACL_LINK;
4032
4033         while (hdev->block_cnt > 0 &&
4034                (chan = hci_chan_sent(hdev, type, &quote))) {
4035                 u32 priority = (skb_peek(&chan->data_q))->priority;
4036                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4037                         int blocks;
4038
4039                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4040                                skb->len, skb->priority);
4041
4042                         /* Stop if priority has changed */
4043                         if (skb->priority < priority)
4044                                 break;
4045
4046                         skb = skb_dequeue(&chan->data_q);
4047
4048                         blocks = __get_blocks(hdev, skb);
4049                         if (blocks > hdev->block_cnt)
4050                                 return;
4051
4052                         hci_conn_enter_active_mode(chan->conn,
4053                                                    bt_cb(skb)->force_active);
4054
4055                         hci_send_frame(hdev, skb);
4056                         hdev->acl_last_tx = jiffies;
4057
4058                         hdev->block_cnt -= blocks;
4059                         quote -= blocks;
4060
4061                         chan->sent += blocks;
4062                         chan->conn->sent += blocks;
4063                 }
4064         }
4065
4066         if (cnt != hdev->block_cnt)
4067                 hci_prio_recalculate(hdev, type);
4068 }
4069
4070 static void hci_sched_acl(struct hci_dev *hdev)
4071 {
4072         BT_DBG("%s", hdev->name);
4073
4074         /* No ACL link over BR/EDR controller */
4075         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4076                 return;
4077
4078         /* No AMP link over AMP controller */
4079         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4080                 return;
4081
4082         switch (hdev->flow_ctl_mode) {
4083         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4084                 hci_sched_acl_pkt(hdev);
4085                 break;
4086
4087         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4088                 hci_sched_acl_blk(hdev);
4089                 break;
4090         }
4091 }
4092
4093 /* Schedule SCO */
4094 static void hci_sched_sco(struct hci_dev *hdev)
4095 {
4096         struct hci_conn *conn;
4097         struct sk_buff *skb;
4098         int quote;
4099
4100         BT_DBG("%s", hdev->name);
4101
4102         if (!hci_conn_num(hdev, SCO_LINK))
4103                 return;
4104
4105         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4106                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4107                         BT_DBG("skb %p len %d", skb, skb->len);
4108                         hci_send_frame(hdev, skb);
4109
4110                         conn->sent++;
4111                         if (conn->sent == ~0)
4112                                 conn->sent = 0;
4113                 }
4114         }
4115 }
4116
4117 static void hci_sched_esco(struct hci_dev *hdev)
4118 {
4119         struct hci_conn *conn;
4120         struct sk_buff *skb;
4121         int quote;
4122
4123         BT_DBG("%s", hdev->name);
4124
4125         if (!hci_conn_num(hdev, ESCO_LINK))
4126                 return;
4127
4128         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4129                                                      &quote))) {
4130                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4131                         BT_DBG("skb %p len %d", skb, skb->len);
4132                         hci_send_frame(hdev, skb);
4133
4134                         conn->sent++;
4135                         if (conn->sent == ~0)
4136                                 conn->sent = 0;
4137                 }
4138         }
4139 }
4140
4141 static void hci_sched_le(struct hci_dev *hdev)
4142 {
4143         struct hci_chan *chan;
4144         struct sk_buff *skb;
4145         int quote, cnt, tmp;
4146
4147         BT_DBG("%s", hdev->name);
4148
4149         if (!hci_conn_num(hdev, LE_LINK))
4150                 return;
4151
4152         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4153                 /* LE tx timeout must be longer than maximum
4154                  * link supervision timeout (40.9 seconds) */
4155                 if (!hdev->le_cnt && hdev->le_pkts &&
4156                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4157                         hci_link_tx_to(hdev, LE_LINK);
4158         }
4159
4160         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4161         tmp = cnt;
4162         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4163                 u32 priority = (skb_peek(&chan->data_q))->priority;
4164                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4165                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4166                                skb->len, skb->priority);
4167
4168                         /* Stop if priority has changed */
4169                         if (skb->priority < priority)
4170                                 break;
4171
4172                         skb = skb_dequeue(&chan->data_q);
4173
4174                         hci_send_frame(hdev, skb);
4175                         hdev->le_last_tx = jiffies;
4176
4177                         cnt--;
4178                         chan->sent++;
4179                         chan->conn->sent++;
4180                 }
4181         }
4182
4183         if (hdev->le_pkts)
4184                 hdev->le_cnt = cnt;
4185         else
4186                 hdev->acl_cnt = cnt;
4187
4188         if (cnt != tmp)
4189                 hci_prio_recalculate(hdev, LE_LINK);
4190 }
4191
4192 static void hci_tx_work(struct work_struct *work)
4193 {
4194         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4195         struct sk_buff *skb;
4196
4197         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4198                hdev->sco_cnt, hdev->le_cnt);
4199
4200         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4201                 /* Schedule queues and send stuff to HCI driver */
4202                 hci_sched_acl(hdev);
4203                 hci_sched_sco(hdev);
4204                 hci_sched_esco(hdev);
4205                 hci_sched_le(hdev);
4206         }
4207
4208         /* Send next queued raw (unknown type) packet */
4209         while ((skb = skb_dequeue(&hdev->raw_q)))
4210                 hci_send_frame(hdev, skb);
4211 }
4212
4213 /* ----- HCI RX task (incoming data processing) ----- */
4214
4215 /* ACL data packet */
4216 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4217 {
4218         struct hci_acl_hdr *hdr = (void *) skb->data;
4219         struct hci_conn *conn;
4220         __u16 handle, flags;
4221
4222         skb_pull(skb, HCI_ACL_HDR_SIZE);
4223
4224         handle = __le16_to_cpu(hdr->handle);
4225         flags  = hci_flags(handle);
4226         handle = hci_handle(handle);
4227
4228         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4229                handle, flags);
4230
4231         hdev->stat.acl_rx++;
4232
4233         hci_dev_lock(hdev);
4234         conn = hci_conn_hash_lookup_handle(hdev, handle);
4235         hci_dev_unlock(hdev);
4236
4237         if (conn) {
4238                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4239
4240                 /* Send to upper protocol */
4241                 l2cap_recv_acldata(conn, skb, flags);
4242                 return;
4243         } else {
4244                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4245                            handle);
4246         }
4247
4248         kfree_skb(skb);
4249 }
4250
4251 /* SCO data packet */
4252 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4253 {
4254         struct hci_sco_hdr *hdr = (void *) skb->data;
4255         struct hci_conn *conn;
4256         __u16 handle;
4257
4258         skb_pull(skb, HCI_SCO_HDR_SIZE);
4259
4260         handle = __le16_to_cpu(hdr->handle);
4261
4262         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4263
4264         hdev->stat.sco_rx++;
4265
4266         hci_dev_lock(hdev);
4267         conn = hci_conn_hash_lookup_handle(hdev, handle);
4268         hci_dev_unlock(hdev);
4269
4270         if (conn) {
4271                 /* Send to upper protocol */
4272                 sco_recv_scodata(conn, skb);
4273                 return;
4274         } else {
4275                 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4276                            handle);
4277         }
4278
4279         kfree_skb(skb);
4280 }
4281
4282 static bool hci_req_is_complete(struct hci_dev *hdev)
4283 {
4284         struct sk_buff *skb;
4285
4286         skb = skb_peek(&hdev->cmd_q);
4287         if (!skb)
4288                 return true;
4289
4290         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4291 }
4292
4293 static void hci_resend_last(struct hci_dev *hdev)
4294 {
4295         struct hci_command_hdr *sent;
4296         struct sk_buff *skb;
4297         u16 opcode;
4298
4299         if (!hdev->sent_cmd)
4300                 return;
4301
4302         sent = (void *) hdev->sent_cmd->data;
4303         opcode = __le16_to_cpu(sent->opcode);
4304         if (opcode == HCI_OP_RESET)
4305                 return;
4306
4307         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4308         if (!skb)
4309                 return;
4310
4311         skb_queue_head(&hdev->cmd_q, skb);
4312         queue_work(hdev->workqueue, &hdev->cmd_work);
4313 }
4314
4315 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4316                           hci_req_complete_t *req_complete,
4317                           hci_req_complete_skb_t *req_complete_skb)
4318 {
4319         struct sk_buff *skb;
4320         unsigned long flags;
4321
4322         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4323
4324         /* If the completed command doesn't match the last one that was
4325          * sent we need to do special handling of it.
4326          */
4327         if (!hci_sent_cmd_data(hdev, opcode)) {
4328                 /* Some CSR based controllers generate a spontaneous
4329                  * reset complete event during init and any pending
4330                  * command will never be completed. In such a case we
4331                  * need to resend whatever was the last sent
4332                  * command.
4333                  */
4334                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4335                         hci_resend_last(hdev);
4336
4337                 return;
4338         }
4339
4340         /* If the command succeeded and there's still more commands in
4341          * this request the request is not yet complete.
4342          */
4343         if (!status && !hci_req_is_complete(hdev))
4344                 return;
4345
4346         /* If this was the last command in a request the complete
4347          * callback would be found in hdev->sent_cmd instead of the
4348          * command queue (hdev->cmd_q).
4349          */
4350         if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4351                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4352                 return;
4353         }
4354
4355         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4356                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4357                 return;
4358         }
4359
4360         /* Remove all pending commands belonging to this request */
4361         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4362         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4363                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4364                         __skb_queue_head(&hdev->cmd_q, skb);
4365                         break;
4366                 }
4367
4368                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4369                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4370                 else
4371                         *req_complete = bt_cb(skb)->hci.req_complete;
4372                 kfree_skb(skb);
4373         }
4374         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4375 }
4376
4377 static void hci_rx_work(struct work_struct *work)
4378 {
4379         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4380         struct sk_buff *skb;
4381
4382         BT_DBG("%s", hdev->name);
4383
4384         while ((skb = skb_dequeue(&hdev->rx_q))) {
4385                 /* Send copy to monitor */
4386                 hci_send_to_monitor(hdev, skb);
4387
4388                 if (atomic_read(&hdev->promisc)) {
4389                         /* Send copy to the sockets */
4390                         hci_send_to_sock(hdev, skb);
4391                 }
4392
4393                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4394                         kfree_skb(skb);
4395                         continue;
4396                 }
4397
4398                 if (test_bit(HCI_INIT, &hdev->flags)) {
4399                         /* Don't process data packets in this states. */
4400                         switch (hci_skb_pkt_type(skb)) {
4401                         case HCI_ACLDATA_PKT:
4402                         case HCI_SCODATA_PKT:
4403                                 kfree_skb(skb);
4404                                 continue;
4405                         }
4406                 }
4407
4408                 /* Process frame */
4409                 switch (hci_skb_pkt_type(skb)) {
4410                 case HCI_EVENT_PKT:
4411                         BT_DBG("%s Event packet", hdev->name);
4412                         hci_event_packet(hdev, skb);
4413                         break;
4414
4415                 case HCI_ACLDATA_PKT:
4416                         BT_DBG("%s ACL data packet", hdev->name);
4417                         hci_acldata_packet(hdev, skb);
4418                         break;
4419
4420                 case HCI_SCODATA_PKT:
4421                         BT_DBG("%s SCO data packet", hdev->name);
4422                         hci_scodata_packet(hdev, skb);
4423                         break;
4424
4425                 default:
4426                         kfree_skb(skb);
4427                         break;
4428                 }
4429         }
4430 }
4431
4432 static void hci_cmd_work(struct work_struct *work)
4433 {
4434         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4435         struct sk_buff *skb;
4436
4437         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4438                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4439
4440         /* Send queued commands */
4441         if (atomic_read(&hdev->cmd_cnt)) {
4442                 skb = skb_dequeue(&hdev->cmd_q);
4443                 if (!skb)
4444                         return;
4445
4446                 kfree_skb(hdev->sent_cmd);
4447
4448                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4449                 if (hdev->sent_cmd) {
4450                         atomic_dec(&hdev->cmd_cnt);
4451                         hci_send_frame(hdev, skb);
4452                         if (test_bit(HCI_RESET, &hdev->flags))
4453                                 cancel_delayed_work(&hdev->cmd_timer);
4454                         else
4455                                 schedule_delayed_work(&hdev->cmd_timer,
4456                                                       HCI_CMD_TIMEOUT);
4457                 } else {
4458                         skb_queue_head(&hdev->cmd_q, skb);
4459                         queue_work(hdev->workqueue, &hdev->cmd_work);
4460                 }
4461         }
4462 }