OSDN Git Service

Merge android-4.4.143 (7bbfac1) into msm-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39         u16 pkt_type;
40         u16 max_latency;
41         u8  retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
46         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
47         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
48         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
49         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
54         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
59         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
60 };
61
62 /* This function requires the caller holds hdev->lock */
63 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
64 {
65         struct hci_conn_params *params;
66         struct hci_dev *hdev = conn->hdev;
67         struct smp_irk *irk;
68         bdaddr_t *bdaddr;
69         u8 bdaddr_type;
70
71         bdaddr = &conn->dst;
72         bdaddr_type = conn->dst_type;
73
74         /* Check if we need to convert to identity address */
75         irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
76         if (irk) {
77                 bdaddr = &irk->bdaddr;
78                 bdaddr_type = irk->addr_type;
79         }
80
81         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
82                                            bdaddr_type);
83         if (!params || !params->explicit_connect)
84                 return;
85
86         /* The connection attempt was doing scan for new RPA, and is
87          * in scan phase. If params are not associated with any other
88          * autoconnect action, remove them completely. If they are, just unmark
89          * them as waiting for connection, by clearing explicit_connect field.
90          */
91         params->explicit_connect = false;
92
93         list_del_init(&params->action);
94
95         switch (params->auto_connect) {
96         case HCI_AUTO_CONN_EXPLICIT:
97                 hci_conn_params_del(hdev, bdaddr, bdaddr_type);
98                 /* return instead of break to avoid duplicate scan update */
99                 return;
100         case HCI_AUTO_CONN_DIRECT:
101         case HCI_AUTO_CONN_ALWAYS:
102                 list_add(&params->action, &hdev->pend_le_conns);
103                 break;
104         case HCI_AUTO_CONN_REPORT:
105                 list_add(&params->action, &hdev->pend_le_reports);
106                 break;
107         default:
108                 break;
109         }
110
111         hci_update_background_scan(hdev);
112 }
113
114 static void hci_conn_cleanup(struct hci_conn *conn)
115 {
116         struct hci_dev *hdev = conn->hdev;
117
118         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
119                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
120
121         hci_chan_list_flush(conn);
122
123         hci_conn_hash_del(hdev, conn);
124
125         if (hdev->notify)
126                 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
127
128         hci_conn_del_sysfs(conn);
129
130         debugfs_remove_recursive(conn->debugfs);
131
132         hci_dev_put(hdev);
133
134         hci_conn_put(conn);
135 }
136
137 static void le_scan_cleanup(struct work_struct *work)
138 {
139         struct hci_conn *conn = container_of(work, struct hci_conn,
140                                              le_scan_cleanup);
141         struct hci_dev *hdev = conn->hdev;
142         struct hci_conn *c = NULL;
143
144         BT_DBG("%s hcon %pK", hdev->name, conn);
145
146         hci_dev_lock(hdev);
147
148         /* Check that the hci_conn is still around */
149         rcu_read_lock();
150         list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
151                 if (c == conn)
152                         break;
153         }
154         rcu_read_unlock();
155
156         if (c == conn) {
157                 hci_connect_le_scan_cleanup(conn);
158                 hci_conn_cleanup(conn);
159         }
160
161         hci_dev_unlock(hdev);
162         hci_dev_put(hdev);
163         hci_conn_put(conn);
164 }
165
166 static void hci_connect_le_scan_remove(struct hci_conn *conn)
167 {
168         BT_DBG("%s hcon %pK", conn->hdev->name, conn);
169
170         /* We can't call hci_conn_del/hci_conn_cleanup here since that
171          * could deadlock with another hci_conn_del() call that's holding
172          * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
173          * Instead, grab temporary extra references to the hci_dev and
174          * hci_conn and perform the necessary cleanup in a separate work
175          * callback.
176          */
177
178         hci_dev_hold(conn->hdev);
179         hci_conn_get(conn);
180
181         schedule_work(&conn->le_scan_cleanup);
182 }
183
184 static void hci_acl_create_connection(struct hci_conn *conn)
185 {
186         struct hci_dev *hdev = conn->hdev;
187         struct inquiry_entry *ie;
188         struct hci_cp_create_conn cp;
189
190         BT_DBG("hcon %pK", conn);
191
192         conn->state = BT_CONNECT;
193         conn->out = true;
194         conn->role = HCI_ROLE_MASTER;
195
196         conn->attempt++;
197
198         conn->link_policy = hdev->link_policy;
199
200         memset(&cp, 0, sizeof(cp));
201         bacpy(&cp.bdaddr, &conn->dst);
202         cp.pscan_rep_mode = 0x02;
203
204         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
205         if (ie) {
206                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
207                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
208                         cp.pscan_mode     = ie->data.pscan_mode;
209                         cp.clock_offset   = ie->data.clock_offset |
210                                             cpu_to_le16(0x8000);
211                 }
212
213                 memcpy(conn->dev_class, ie->data.dev_class, 3);
214                 if (ie->data.ssp_mode > 0)
215                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
216         }
217
218         cp.pkt_type = cpu_to_le16(conn->pkt_type);
219         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
220                 cp.role_switch = 0x01;
221         else
222                 cp.role_switch = 0x00;
223
224         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
225 }
226
227 int hci_disconnect(struct hci_conn *conn, __u8 reason)
228 {
229         BT_DBG("hcon %pK", conn);
230
231         /* When we are master of an established connection and it enters
232          * the disconnect timeout, then go ahead and try to read the
233          * current clock offset.  Processing of the result is done
234          * within the event handling and hci_clock_offset_evt function.
235          */
236         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
237             (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
238                 struct hci_dev *hdev = conn->hdev;
239                 struct hci_cp_read_clock_offset clkoff_cp;
240
241                 clkoff_cp.handle = cpu_to_le16(conn->handle);
242                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
243                              &clkoff_cp);
244         }
245
246         return hci_abort_conn(conn, reason);
247 }
248
249 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
250 {
251         struct hci_dev *hdev = conn->hdev;
252         struct hci_cp_add_sco cp;
253
254         BT_DBG("hcon %pK", conn);
255
256         conn->state = BT_CONNECT;
257         conn->out = true;
258
259         conn->attempt++;
260
261         cp.handle   = cpu_to_le16(handle);
262         cp.pkt_type = cpu_to_le16(conn->pkt_type);
263
264         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
265 }
266
267 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
268 {
269         struct hci_dev *hdev = conn->hdev;
270         struct hci_cp_setup_sync_conn cp;
271         const struct sco_param *param;
272
273         BT_DBG("hcon %pK", conn);
274
275         conn->state = BT_CONNECT;
276         conn->out = true;
277
278         conn->attempt++;
279
280         cp.handle   = cpu_to_le16(handle);
281
282         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
283         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
284         cp.voice_setting  = cpu_to_le16(conn->setting);
285
286         switch (conn->setting & SCO_AIRMODE_MASK) {
287         case SCO_AIRMODE_TRANSP:
288                 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
289                         return false;
290                 param = &esco_param_msbc[conn->attempt - 1];
291                 break;
292         case SCO_AIRMODE_CVSD:
293                 if (lmp_esco_capable(conn->link)) {
294                         if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
295                                 return false;
296                         param = &esco_param_cvsd[conn->attempt - 1];
297                 } else {
298                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
299                                 return false;
300                         param = &sco_param_cvsd[conn->attempt - 1];
301                 }
302                 break;
303         default:
304                 return false;
305         }
306
307         cp.retrans_effort = param->retrans_effort;
308         cp.pkt_type = __cpu_to_le16(param->pkt_type);
309         cp.max_latency = __cpu_to_le16(param->max_latency);
310
311         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
312                 return false;
313
314         return true;
315 }
316
317 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
318                       u16 to_multiplier)
319 {
320         struct hci_dev *hdev = conn->hdev;
321         struct hci_conn_params *params;
322         struct hci_cp_le_conn_update cp;
323
324         hci_dev_lock(hdev);
325
326         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
327         if (params) {
328                 params->conn_min_interval = min;
329                 params->conn_max_interval = max;
330                 params->conn_latency = latency;
331                 params->supervision_timeout = to_multiplier;
332         }
333
334         hci_dev_unlock(hdev);
335
336         memset(&cp, 0, sizeof(cp));
337         cp.handle               = cpu_to_le16(conn->handle);
338         cp.conn_interval_min    = cpu_to_le16(min);
339         cp.conn_interval_max    = cpu_to_le16(max);
340         cp.conn_latency         = cpu_to_le16(latency);
341         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
342         cp.min_ce_len           = cpu_to_le16(0x0000);
343         cp.max_ce_len           = cpu_to_le16(0x0000);
344
345         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
346
347         if (params)
348                 return 0x01;
349
350         return 0x00;
351 }
352
353 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
354                       __u8 ltk[16], __u8 key_size)
355 {
356         struct hci_dev *hdev = conn->hdev;
357         struct hci_cp_le_start_enc cp;
358
359         BT_DBG("hcon %pK", conn);
360
361         memset(&cp, 0, sizeof(cp));
362
363         cp.handle = cpu_to_le16(conn->handle);
364         cp.rand = rand;
365         cp.ediv = ediv;
366         memcpy(cp.ltk, ltk, key_size);
367
368         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
369 }
370
371 /* Device _must_ be locked */
372 void hci_sco_setup(struct hci_conn *conn, __u8 status)
373 {
374         struct hci_conn *sco = conn->link;
375
376         if (!sco)
377                 return;
378
379         BT_DBG("hcon %pK", conn);
380
381         if (!status) {
382                 if (lmp_esco_capable(conn->hdev))
383                         hci_setup_sync(sco, conn->handle);
384                 else
385                         hci_add_sco(sco, conn->handle);
386         } else {
387                 hci_connect_cfm(sco, status);
388                 hci_conn_del(sco);
389         }
390 }
391
392 static void hci_conn_timeout(struct work_struct *work)
393 {
394         struct hci_conn *conn = container_of(work, struct hci_conn,
395                                              disc_work.work);
396         int refcnt = atomic_read(&conn->refcnt);
397
398         BT_DBG("hcon %pK state %s", conn, state_to_string(conn->state));
399
400         WARN_ON(refcnt < 0);
401
402         /* FIXME: It was observed that in pairing failed scenario, refcnt
403          * drops below 0. Probably this is because l2cap_conn_del calls
404          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
405          * dropped. After that loop hci_chan_del is called which also drops
406          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
407          * otherwise drop it.
408          */
409         if (refcnt > 0)
410                 return;
411
412         /* LE connections in scanning state need special handling */
413         if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
414             test_bit(HCI_CONN_SCANNING, &conn->flags)) {
415                 hci_connect_le_scan_remove(conn);
416                 return;
417         }
418
419         hci_abort_conn(conn, hci_proto_disconn_ind(conn));
420 }
421
422 /* Enter sniff mode */
423 static void hci_conn_idle(struct work_struct *work)
424 {
425         struct hci_conn *conn = container_of(work, struct hci_conn,
426                                              idle_work.work);
427         struct hci_dev *hdev = conn->hdev;
428
429         BT_DBG("hcon %pK mode %d", conn, conn->mode);
430
431         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
432                 return;
433
434         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
435                 return;
436
437         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
438                 struct hci_cp_sniff_subrate cp;
439                 cp.handle             = cpu_to_le16(conn->handle);
440                 cp.max_latency        = cpu_to_le16(0);
441                 cp.min_remote_timeout = cpu_to_le16(0);
442                 cp.min_local_timeout  = cpu_to_le16(0);
443                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
444         }
445
446         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
447                 struct hci_cp_sniff_mode cp;
448                 cp.handle       = cpu_to_le16(conn->handle);
449                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
450                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
451                 cp.attempt      = cpu_to_le16(4);
452                 cp.timeout      = cpu_to_le16(1);
453                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
454         }
455 }
456
457 static void hci_conn_auto_accept(struct work_struct *work)
458 {
459         struct hci_conn *conn = container_of(work, struct hci_conn,
460                                              auto_accept_work.work);
461
462         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
463                      &conn->dst);
464 }
465
466 static void le_conn_timeout(struct work_struct *work)
467 {
468         struct hci_conn *conn = container_of(work, struct hci_conn,
469                                              le_conn_timeout.work);
470         struct hci_dev *hdev = conn->hdev;
471
472         BT_DBG("");
473
474         /* We could end up here due to having done directed advertising,
475          * so clean up the state if necessary. This should however only
476          * happen with broken hardware or if low duty cycle was used
477          * (which doesn't have a timeout of its own).
478          */
479         if (conn->role == HCI_ROLE_SLAVE) {
480                 u8 enable = 0x00;
481                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
482                              &enable);
483                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
484                 return;
485         }
486
487         hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
488 }
489
490 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
491                               u8 role)
492 {
493         struct hci_conn *conn;
494
495         BT_DBG("%s dst %pMR", hdev->name, dst);
496
497         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
498         if (!conn)
499                 return NULL;
500
501         bacpy(&conn->dst, dst);
502         bacpy(&conn->src, &hdev->bdaddr);
503         conn->hdev  = hdev;
504         conn->type  = type;
505         conn->role  = role;
506         conn->mode  = HCI_CM_ACTIVE;
507         conn->state = BT_OPEN;
508         conn->auth_type = HCI_AT_GENERAL_BONDING;
509         conn->io_capability = hdev->io_capability;
510         conn->remote_auth = 0xff;
511         conn->key_type = 0xff;
512         conn->rssi = HCI_RSSI_INVALID;
513         conn->tx_power = HCI_TX_POWER_INVALID;
514         conn->max_tx_power = HCI_TX_POWER_INVALID;
515
516         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
517         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
518
519         if (conn->role == HCI_ROLE_MASTER)
520                 conn->out = true;
521
522         switch (type) {
523         case ACL_LINK:
524                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
525                 break;
526         case LE_LINK:
527                 /* conn->src should reflect the local identity address */
528                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
529                 break;
530         case SCO_LINK:
531                 if (lmp_esco_capable(hdev))
532                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
533                                         (hdev->esco_type & EDR_ESCO_MASK);
534                 else
535                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
536                 break;
537         case ESCO_LINK:
538                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
539                 break;
540         }
541
542         skb_queue_head_init(&conn->data_q);
543
544         INIT_LIST_HEAD(&conn->chan_list);
545
546         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
547         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
548         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
549         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
550         INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
551
552         atomic_set(&conn->refcnt, 0);
553
554         hci_dev_hold(hdev);
555
556         hci_conn_hash_add(hdev, conn);
557         if (hdev->notify)
558                 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
559
560         hci_conn_init_sysfs(conn);
561
562         return conn;
563 }
564
565 int hci_conn_del(struct hci_conn *conn)
566 {
567         struct hci_dev *hdev = conn->hdev;
568
569         BT_DBG("%s hcon %pK handle %d", hdev->name, conn, conn->handle);
570
571         cancel_delayed_work_sync(&conn->disc_work);
572         cancel_delayed_work_sync(&conn->auto_accept_work);
573         cancel_delayed_work_sync(&conn->idle_work);
574
575         if (conn->type == ACL_LINK) {
576                 struct hci_conn *sco = conn->link;
577                 if (sco)
578                         sco->link = NULL;
579
580                 /* Unacked frames */
581                 hdev->acl_cnt += conn->sent;
582         } else if (conn->type == LE_LINK) {
583                 cancel_delayed_work(&conn->le_conn_timeout);
584
585                 if (hdev->le_pkts)
586                         hdev->le_cnt += conn->sent;
587                 else
588                         hdev->acl_cnt += conn->sent;
589         } else {
590                 struct hci_conn *acl = conn->link;
591                 if (acl) {
592                         acl->link = NULL;
593                         hci_conn_drop(acl);
594                 }
595         }
596
597         if (conn->amp_mgr)
598                 amp_mgr_put(conn->amp_mgr);
599
600         skb_queue_purge(&conn->data_q);
601
602         /* Remove the connection from the list and cleanup its remaining
603          * state. This is a separate function since for some cases like
604          * BT_CONNECT_SCAN we *only* want the cleanup part without the
605          * rest of hci_conn_del.
606          */
607         hci_conn_cleanup(conn);
608
609         return 0;
610 }
611
612 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
613 {
614         int use_src = bacmp(src, BDADDR_ANY);
615         struct hci_dev *hdev = NULL, *d;
616
617         BT_DBG("%pMR -> %pMR", src, dst);
618
619         read_lock(&hci_dev_list_lock);
620
621         list_for_each_entry(d, &hci_dev_list, list) {
622                 if (!test_bit(HCI_UP, &d->flags) ||
623                     hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
624                     d->dev_type != HCI_BREDR)
625                         continue;
626
627                 /* Simple routing:
628                  *   No source address - find interface with bdaddr != dst
629                  *   Source address    - find interface with bdaddr == src
630                  */
631
632                 if (use_src) {
633                         if (!bacmp(&d->bdaddr, src)) {
634                                 hdev = d; break;
635                         }
636                 } else {
637                         if (bacmp(&d->bdaddr, dst)) {
638                                 hdev = d; break;
639                         }
640                 }
641         }
642
643         if (hdev)
644                 hdev = hci_dev_hold(hdev);
645
646         read_unlock(&hci_dev_list_lock);
647         return hdev;
648 }
649 EXPORT_SYMBOL(hci_get_route);
650
651 /* This function requires the caller holds hdev->lock */
652 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
653 {
654         struct hci_dev *hdev = conn->hdev;
655         struct hci_conn_params *params;
656
657         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
658                                            conn->dst_type);
659         if (params && params->conn) {
660                 hci_conn_drop(params->conn);
661                 hci_conn_put(params->conn);
662                 params->conn = NULL;
663         }
664
665         conn->state = BT_CLOSED;
666
667         mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
668                             status);
669
670         hci_connect_cfm(conn, status);
671
672         hci_conn_del(conn);
673
674         /* Since we may have temporarily stopped the background scanning in
675          * favor of connection establishment, we should restart it.
676          */
677         hci_update_background_scan(hdev);
678
679         /* Re-enable advertising in case this was a failed connection
680          * attempt as a peripheral.
681          */
682         mgmt_reenable_advertising(hdev);
683 }
684
685 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
686 {
687         struct hci_conn *conn;
688
689         hci_dev_lock(hdev);
690
691         conn = hci_lookup_le_connect(hdev);
692
693         if (!status) {
694                 hci_connect_le_scan_cleanup(conn);
695                 goto done;
696         }
697
698         BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
699                status);
700
701         if (!conn)
702                 goto done;
703
704         hci_le_conn_failed(conn, status);
705
706 done:
707         hci_dev_unlock(hdev);
708 }
709
710 static void hci_req_add_le_create_conn(struct hci_request *req,
711                                        struct hci_conn *conn,
712                                        bdaddr_t *direct_rpa)
713 {
714         struct hci_cp_le_create_conn cp;
715         struct hci_dev *hdev = conn->hdev;
716         u8 own_addr_type;
717
718         memset(&cp, 0, sizeof(cp));
719
720         /* If direct address was provided we use it instead of current
721          * address.
722          */
723         if (direct_rpa) {
724                 if (bacmp(&req->hdev->random_addr, direct_rpa))
725                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
726                                                                 direct_rpa);
727
728                 /* direct address is always RPA */
729                 own_addr_type = ADDR_LE_DEV_RANDOM;
730         } else {
731                 /* Update random address, but set require_privacy to false so
732                  * that we never connect with an non-resolvable address.
733                  */
734                 if (hci_update_random_address(req, false, &own_addr_type))
735                         return;
736         }
737
738         /* Set window to be the same value as the interval to enable
739          * continuous scanning.
740          */
741         cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
742         cp.scan_window = cp.scan_interval;
743
744         bacpy(&cp.peer_addr, &conn->dst);
745         cp.peer_addr_type = conn->dst_type;
746         cp.own_address_type = own_addr_type;
747         cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
748         cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
749         cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
750         cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
751         cp.min_ce_len = cpu_to_le16(0x0000);
752         cp.max_ce_len = cpu_to_le16(0x0000);
753
754         hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
755
756         conn->state = BT_CONNECT;
757         clear_bit(HCI_CONN_SCANNING, &conn->flags);
758 }
759
760 static void hci_req_directed_advertising(struct hci_request *req,
761                                          struct hci_conn *conn)
762 {
763         struct hci_dev *hdev = req->hdev;
764         struct hci_cp_le_set_adv_param cp;
765         u8 own_addr_type;
766         u8 enable;
767
768         /* Clear the HCI_LE_ADV bit temporarily so that the
769          * hci_update_random_address knows that it's safe to go ahead
770          * and write a new random address. The flag will be set back on
771          * as soon as the SET_ADV_ENABLE HCI command completes.
772          */
773         hci_dev_clear_flag(hdev, HCI_LE_ADV);
774
775         /* Set require_privacy to false so that the remote device has a
776          * chance of identifying us.
777          */
778         if (hci_update_random_address(req, false, &own_addr_type) < 0)
779                 return;
780
781         memset(&cp, 0, sizeof(cp));
782         cp.type = LE_ADV_DIRECT_IND;
783         cp.own_address_type = own_addr_type;
784         cp.direct_addr_type = conn->dst_type;
785         bacpy(&cp.direct_addr, &conn->dst);
786         cp.channel_map = hdev->le_adv_channel_map;
787
788         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
789
790         enable = 0x01;
791         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
792
793         conn->state = BT_CONNECT;
794 }
795
796 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
797                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
798                                 u8 role, bdaddr_t *direct_rpa)
799 {
800         struct hci_conn_params *params;
801         struct hci_conn *conn, *conn_unfinished;
802         struct smp_irk *irk;
803         struct hci_request req;
804         int err;
805
806         /* Let's make sure that le is enabled.*/
807         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
808                 if (lmp_le_capable(hdev))
809                         return ERR_PTR(-ECONNREFUSED);
810
811                 return ERR_PTR(-EOPNOTSUPP);
812         }
813
814         /* Some devices send ATT messages as soon as the physical link is
815          * established. To be able to handle these ATT messages, the user-
816          * space first establishes the connection and then starts the pairing
817          * process.
818          *
819          * So if a hci_conn object already exists for the following connection
820          * attempt, we simply update pending_sec_level and auth_type fields
821          * and return the object found.
822          */
823         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
824         conn_unfinished = NULL;
825         if (conn) {
826                 if (conn->state == BT_CONNECT &&
827                     test_bit(HCI_CONN_SCANNING, &conn->flags)) {
828                         BT_DBG("will continue unfinished conn %pMR", dst);
829                         conn_unfinished = conn;
830                 } else {
831                         if (conn->pending_sec_level < sec_level)
832                                 conn->pending_sec_level = sec_level;
833                         goto done;
834                 }
835         }
836
837         /* Since the controller supports only one LE connection attempt at a
838          * time, we return -EBUSY if there is any connection attempt running.
839          */
840         if (hci_lookup_le_connect(hdev))
841                 return ERR_PTR(-EBUSY);
842
843         /* When given an identity address with existing identity
844          * resolving key, the connection needs to be established
845          * to a resolvable random address.
846          *
847          * Storing the resolvable random address is required here
848          * to handle connection failures. The address will later
849          * be resolved back into the original identity address
850          * from the connect request.
851          */
852         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
853         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
854                 dst = &irk->rpa;
855                 dst_type = ADDR_LE_DEV_RANDOM;
856         }
857
858         if (conn_unfinished) {
859                 conn = conn_unfinished;
860                 bacpy(&conn->dst, dst);
861         } else {
862                 conn = hci_conn_add(hdev, LE_LINK, dst, role);
863         }
864
865         if (!conn)
866                 return ERR_PTR(-ENOMEM);
867
868         conn->dst_type = dst_type;
869         conn->sec_level = BT_SECURITY_LOW;
870         conn->conn_timeout = conn_timeout;
871
872         if (!conn_unfinished)
873                 conn->pending_sec_level = sec_level;
874
875         hci_req_init(&req, hdev);
876
877         /* Disable advertising if we're active. For master role
878          * connections most controllers will refuse to connect if
879          * advertising is enabled, and for slave role connections we
880          * anyway have to disable it in order to start directed
881          * advertising.
882          */
883         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
884                 u8 enable = 0x00;
885                 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
886                             &enable);
887         }
888
889         /* If requested to connect as slave use directed advertising */
890         if (conn->role == HCI_ROLE_SLAVE) {
891                 /* If we're active scanning most controllers are unable
892                  * to initiate advertising. Simply reject the attempt.
893                  */
894                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
895                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
896                         skb_queue_purge(&req.cmd_q);
897                         hci_conn_del(conn);
898                         return ERR_PTR(-EBUSY);
899                 }
900
901                 hci_req_directed_advertising(&req, conn);
902                 goto create_conn;
903         }
904
905         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
906         if (params) {
907                 conn->le_conn_min_interval = params->conn_min_interval;
908                 conn->le_conn_max_interval = params->conn_max_interval;
909                 conn->le_conn_latency = params->conn_latency;
910                 conn->le_supv_timeout = params->supervision_timeout;
911         } else {
912                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
913                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
914                 conn->le_conn_latency = hdev->le_conn_latency;
915                 conn->le_supv_timeout = hdev->le_supv_timeout;
916         }
917
918         /* If controller is scanning, we stop it since some controllers are
919          * not able to scan and connect at the same time. Also set the
920          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
921          * handler for scan disabling knows to set the correct discovery
922          * state.
923          */
924         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
925                 hci_req_add_le_scan_disable(&req);
926                 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
927         }
928
929         hci_req_add_le_create_conn(&req, conn, direct_rpa);
930
931 create_conn:
932         err = hci_req_run(&req, create_le_conn_complete);
933         if (err) {
934                 hci_conn_del(conn);
935                 return ERR_PTR(err);
936         }
937
938 done:
939         /* If this is continuation of connect started by hci_connect_le_scan,
940          * it already called hci_conn_hold and calling it again would mess the
941          * counter.
942          */
943         if (!conn_unfinished)
944                 hci_conn_hold(conn);
945
946         return conn;
947 }
948
949 static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
950                                          u16 opcode)
951 {
952         struct hci_conn *conn;
953
954         if (!status)
955                 return;
956
957         BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
958                status);
959
960         hci_dev_lock(hdev);
961
962         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
963         if (conn)
964                 hci_le_conn_failed(conn, status);
965
966         hci_dev_unlock(hdev);
967 }
968
969 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
970 {
971         struct hci_conn *conn;
972
973         conn = hci_conn_hash_lookup_le(hdev, addr, type);
974         if (!conn)
975                 return false;
976
977         if (conn->state != BT_CONNECTED)
978                 return false;
979
980         return true;
981 }
982
983 /* This function requires the caller holds hdev->lock */
984 static int hci_explicit_conn_params_set(struct hci_request *req,
985                                         bdaddr_t *addr, u8 addr_type)
986 {
987         struct hci_dev *hdev = req->hdev;
988         struct hci_conn_params *params;
989
990         if (is_connected(hdev, addr, addr_type))
991                 return -EISCONN;
992
993         params = hci_conn_params_lookup(hdev, addr, addr_type);
994         if (!params) {
995                 params = hci_conn_params_add(hdev, addr, addr_type);
996                 if (!params)
997                         return -ENOMEM;
998
999                 /* If we created new params, mark them to be deleted in
1000                  * hci_connect_le_scan_cleanup. It's different case than
1001                  * existing disabled params, those will stay after cleanup.
1002                  */
1003                 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
1004         }
1005
1006         /* We're trying to connect, so make sure params are at pend_le_conns */
1007         if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
1008             params->auto_connect == HCI_AUTO_CONN_REPORT ||
1009             params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
1010                 list_del_init(&params->action);
1011                 list_add(&params->action, &hdev->pend_le_conns);
1012         }
1013
1014         params->explicit_connect = true;
1015         __hci_update_background_scan(req);
1016
1017         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
1018                params->auto_connect);
1019
1020         return 0;
1021 }
1022
1023 /* This function requires the caller holds hdev->lock */
1024 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1025                                      u8 dst_type, u8 sec_level,
1026                                      u16 conn_timeout, u8 role)
1027 {
1028         struct hci_conn *conn;
1029         struct hci_request req;
1030         int err;
1031
1032         /* Let's make sure that le is enabled.*/
1033         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1034                 if (lmp_le_capable(hdev))
1035                         return ERR_PTR(-ECONNREFUSED);
1036
1037                 return ERR_PTR(-EOPNOTSUPP);
1038         }
1039
1040         /* Some devices send ATT messages as soon as the physical link is
1041          * established. To be able to handle these ATT messages, the user-
1042          * space first establishes the connection and then starts the pairing
1043          * process.
1044          *
1045          * So if a hci_conn object already exists for the following connection
1046          * attempt, we simply update pending_sec_level and auth_type fields
1047          * and return the object found.
1048          */
1049         conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
1050         if (conn) {
1051                 if (conn->pending_sec_level < sec_level)
1052                         conn->pending_sec_level = sec_level;
1053                 goto done;
1054         }
1055
1056         BT_DBG("requesting refresh of dst_addr");
1057
1058         conn = hci_conn_add(hdev, LE_LINK, dst, role);
1059         if (!conn)
1060                 return ERR_PTR(-ENOMEM);
1061
1062         hci_req_init(&req, hdev);
1063
1064         if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
1065                 return ERR_PTR(-EBUSY);
1066
1067         conn->state = BT_CONNECT;
1068         set_bit(HCI_CONN_SCANNING, &conn->flags);
1069
1070         err = hci_req_run(&req, hci_connect_le_scan_complete);
1071         if (err && err != -ENODATA) {
1072                 hci_conn_del(conn);
1073                 return ERR_PTR(err);
1074         }
1075
1076         conn->dst_type = dst_type;
1077         conn->sec_level = BT_SECURITY_LOW;
1078         conn->pending_sec_level = sec_level;
1079         conn->conn_timeout = conn_timeout;
1080
1081 done:
1082         hci_conn_hold(conn);
1083         return conn;
1084 }
1085
1086 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1087                                  u8 sec_level, u8 auth_type)
1088 {
1089         struct hci_conn *acl;
1090
1091         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1092                 if (lmp_bredr_capable(hdev))
1093                         return ERR_PTR(-ECONNREFUSED);
1094
1095                 return ERR_PTR(-EOPNOTSUPP);
1096         }
1097
1098         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1099         if (!acl) {
1100                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1101                 if (!acl)
1102                         return ERR_PTR(-ENOMEM);
1103         }
1104
1105         hci_conn_hold(acl);
1106
1107         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1108                 acl->sec_level = BT_SECURITY_LOW;
1109                 acl->pending_sec_level = sec_level;
1110                 acl->auth_type = auth_type;
1111                 hci_acl_create_connection(acl);
1112         }
1113
1114         return acl;
1115 }
1116
1117 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1118                                  __u16 setting)
1119 {
1120         struct hci_conn *acl;
1121         struct hci_conn *sco;
1122
1123         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1124         if (IS_ERR(acl))
1125                 return acl;
1126
1127         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1128         if (!sco) {
1129                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1130                 if (!sco) {
1131                         hci_conn_drop(acl);
1132                         return ERR_PTR(-ENOMEM);
1133                 }
1134         }
1135
1136         acl->link = sco;
1137         sco->link = acl;
1138
1139         hci_conn_hold(sco);
1140
1141         sco->setting = setting;
1142
1143         if (acl->state == BT_CONNECTED &&
1144             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1145                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1146                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1147
1148                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1149                         /* defer SCO setup until mode change completed */
1150                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1151                         return sco;
1152                 }
1153
1154                 hci_sco_setup(acl, 0x00);
1155         }
1156
1157         return sco;
1158 }
1159
1160 /* Check link security requirement */
1161 int hci_conn_check_link_mode(struct hci_conn *conn)
1162 {
1163         BT_DBG("hcon %pK", conn);
1164
1165         /* In Secure Connections Only mode, it is required that Secure
1166          * Connections is used and the link is encrypted with AES-CCM
1167          * using a P-256 authenticated combination key.
1168          */
1169         if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1170                 if (!hci_conn_sc_enabled(conn) ||
1171                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1172                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1173                         return 0;
1174         }
1175
1176         if (hci_conn_ssp_enabled(conn) &&
1177             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1178                 return 0;
1179
1180         return 1;
1181 }
1182
1183 /* Authenticate remote device */
1184 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1185 {
1186         BT_DBG("hcon %pK", conn);
1187
1188         if (conn->pending_sec_level > sec_level)
1189                 sec_level = conn->pending_sec_level;
1190
1191         if (sec_level > conn->sec_level)
1192                 conn->pending_sec_level = sec_level;
1193         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1194                 return 1;
1195
1196         /* Make sure we preserve an existing MITM requirement*/
1197         auth_type |= (conn->auth_type & 0x01);
1198
1199         conn->auth_type = auth_type;
1200
1201         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1202                 struct hci_cp_auth_requested cp;
1203
1204                 cp.handle = cpu_to_le16(conn->handle);
1205                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1206                              sizeof(cp), &cp);
1207
1208                 /* If we're already encrypted set the REAUTH_PEND flag,
1209                  * otherwise set the ENCRYPT_PEND.
1210                  */
1211                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1212                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1213                 else
1214                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1215         }
1216
1217         return 0;
1218 }
1219
1220 /* Encrypt the the link */
1221 static void hci_conn_encrypt(struct hci_conn *conn)
1222 {
1223         BT_DBG("hcon %pK", conn);
1224
1225         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1226                 struct hci_cp_set_conn_encrypt cp;
1227                 cp.handle  = cpu_to_le16(conn->handle);
1228                 cp.encrypt = 0x01;
1229                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1230                              &cp);
1231         }
1232 }
1233
1234 /* Enable security */
1235 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1236                       bool initiator)
1237 {
1238         BT_DBG("hcon %pK", conn);
1239
1240         if (conn->type == LE_LINK)
1241                 return smp_conn_security(conn, sec_level);
1242
1243         /* For sdp we don't need the link key. */
1244         if (sec_level == BT_SECURITY_SDP)
1245                 return 1;
1246
1247         /* For non 2.1 devices and low security level we don't need the link
1248            key. */
1249         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1250                 return 1;
1251
1252         /* For other security levels we need the link key. */
1253         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1254                 goto auth;
1255
1256         /* An authenticated FIPS approved combination key has sufficient
1257          * security for security level 4. */
1258         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1259             sec_level == BT_SECURITY_FIPS)
1260                 goto encrypt;
1261
1262         /* An authenticated combination key has sufficient security for
1263            security level 3. */
1264         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1265              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1266             sec_level == BT_SECURITY_HIGH)
1267                 goto encrypt;
1268
1269         /* An unauthenticated combination key has sufficient security for
1270            security level 1 and 2. */
1271         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1272              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1273             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1274                 goto encrypt;
1275
1276         /* A combination key has always sufficient security for the security
1277            levels 1 or 2. High security level requires the combination key
1278            is generated using maximum PIN code length (16).
1279            For pre 2.1 units. */
1280         if (conn->key_type == HCI_LK_COMBINATION &&
1281             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1282              conn->pin_length == 16))
1283                 goto encrypt;
1284
1285 auth:
1286         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1287                 return 0;
1288
1289         if (initiator)
1290                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1291
1292         if (!hci_conn_auth(conn, sec_level, auth_type))
1293                 return 0;
1294
1295 encrypt:
1296         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1297                 return 1;
1298
1299         hci_conn_encrypt(conn);
1300         return 0;
1301 }
1302 EXPORT_SYMBOL(hci_conn_security);
1303
1304 /* Check secure link requirement */
1305 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1306 {
1307         BT_DBG("hcon %pK", conn);
1308
1309         /* Accept if non-secure or higher security level is required */
1310         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1311                 return 1;
1312
1313         /* Accept if secure or higher security level is already present */
1314         if (conn->sec_level == BT_SECURITY_HIGH ||
1315             conn->sec_level == BT_SECURITY_FIPS)
1316                 return 1;
1317
1318         /* Reject not secure link */
1319         return 0;
1320 }
1321 EXPORT_SYMBOL(hci_conn_check_secure);
1322
1323 /* Switch role */
1324 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1325 {
1326         BT_DBG("hcon %pK", conn);
1327
1328         if (role == conn->role)
1329                 return 1;
1330
1331         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1332                 struct hci_cp_switch_role cp;
1333                 bacpy(&cp.bdaddr, &conn->dst);
1334                 cp.role = role;
1335                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1336         }
1337
1338         return 0;
1339 }
1340 EXPORT_SYMBOL(hci_conn_switch_role);
1341
1342 /* Enter active mode */
1343 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1344 {
1345         struct hci_dev *hdev = conn->hdev;
1346
1347         BT_DBG("hcon %pK mode %d", conn, conn->mode);
1348
1349         if (conn->mode != HCI_CM_SNIFF)
1350                 goto timer;
1351
1352         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1353                 goto timer;
1354
1355         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1356                 struct hci_cp_exit_sniff_mode cp;
1357                 cp.handle = cpu_to_le16(conn->handle);
1358                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1359         }
1360
1361 timer:
1362         if (hdev->idle_timeout > 0)
1363                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1364                                    msecs_to_jiffies(hdev->idle_timeout));
1365 }
1366
1367 /* Drop all connection on the device */
1368 void hci_conn_hash_flush(struct hci_dev *hdev)
1369 {
1370         struct hci_conn_hash *h = &hdev->conn_hash;
1371         struct hci_conn *c, *n;
1372
1373         BT_DBG("hdev %s", hdev->name);
1374
1375         list_for_each_entry_safe(c, n, &h->list, list) {
1376                 c->state = BT_CLOSED;
1377
1378                 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1379                 hci_conn_del(c);
1380         }
1381 }
1382
1383 /* Check pending connect attempts */
1384 void hci_conn_check_pending(struct hci_dev *hdev)
1385 {
1386         struct hci_conn *conn;
1387
1388         BT_DBG("hdev %s", hdev->name);
1389
1390         hci_dev_lock(hdev);
1391
1392         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1393         if (conn)
1394                 hci_acl_create_connection(conn);
1395
1396         hci_dev_unlock(hdev);
1397 }
1398
1399 static u32 get_link_mode(struct hci_conn *conn)
1400 {
1401         u32 link_mode = 0;
1402
1403         if (conn->role == HCI_ROLE_MASTER)
1404                 link_mode |= HCI_LM_MASTER;
1405
1406         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1407                 link_mode |= HCI_LM_ENCRYPT;
1408
1409         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1410                 link_mode |= HCI_LM_AUTH;
1411
1412         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1413                 link_mode |= HCI_LM_SECURE;
1414
1415         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1416                 link_mode |= HCI_LM_FIPS;
1417
1418         return link_mode;
1419 }
1420
1421 int hci_get_conn_list(void __user *arg)
1422 {
1423         struct hci_conn *c;
1424         struct hci_conn_list_req req, *cl;
1425         struct hci_conn_info *ci;
1426         struct hci_dev *hdev;
1427         int n = 0, size, err;
1428
1429         if (copy_from_user(&req, arg, sizeof(req)))
1430                 return -EFAULT;
1431
1432         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1433                 return -EINVAL;
1434
1435         size = sizeof(req) + req.conn_num * sizeof(*ci);
1436
1437         cl = kmalloc(size, GFP_KERNEL);
1438         if (!cl)
1439                 return -ENOMEM;
1440
1441         hdev = hci_dev_get(req.dev_id);
1442         if (!hdev) {
1443                 kfree(cl);
1444                 return -ENODEV;
1445         }
1446
1447         ci = cl->conn_info;
1448
1449         hci_dev_lock(hdev);
1450         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1451                 bacpy(&(ci + n)->bdaddr, &c->dst);
1452                 (ci + n)->handle = c->handle;
1453                 (ci + n)->type  = c->type;
1454                 (ci + n)->out   = c->out;
1455                 (ci + n)->state = c->state;
1456                 (ci + n)->link_mode = get_link_mode(c);
1457                 if (++n >= req.conn_num)
1458                         break;
1459         }
1460         hci_dev_unlock(hdev);
1461
1462         cl->dev_id = hdev->id;
1463         cl->conn_num = n;
1464         size = sizeof(req) + n * sizeof(*ci);
1465
1466         hci_dev_put(hdev);
1467
1468         err = copy_to_user(arg, cl, size);
1469         kfree(cl);
1470
1471         return err ? -EFAULT : 0;
1472 }
1473
1474 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1475 {
1476         struct hci_conn_info_req req;
1477         struct hci_conn_info ci;
1478         struct hci_conn *conn;
1479         char __user *ptr = arg + sizeof(req);
1480
1481         if (copy_from_user(&req, arg, sizeof(req)))
1482                 return -EFAULT;
1483
1484         hci_dev_lock(hdev);
1485         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1486         if (conn) {
1487                 bacpy(&ci.bdaddr, &conn->dst);
1488                 ci.handle = conn->handle;
1489                 ci.type  = conn->type;
1490                 ci.out   = conn->out;
1491                 ci.state = conn->state;
1492                 ci.link_mode = get_link_mode(conn);
1493         }
1494         hci_dev_unlock(hdev);
1495
1496         if (!conn)
1497                 return -ENOENT;
1498
1499         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1500 }
1501
1502 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1503 {
1504         struct hci_auth_info_req req;
1505         struct hci_conn *conn;
1506
1507         if (copy_from_user(&req, arg, sizeof(req)))
1508                 return -EFAULT;
1509
1510         hci_dev_lock(hdev);
1511         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1512         if (conn)
1513                 req.type = conn->auth_type;
1514         hci_dev_unlock(hdev);
1515
1516         if (!conn)
1517                 return -ENOENT;
1518
1519         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1520 }
1521
1522 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1523 {
1524         struct hci_dev *hdev = conn->hdev;
1525         struct hci_chan *chan;
1526
1527         BT_DBG("%s hcon %pK", hdev->name, conn);
1528
1529         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1530                 BT_DBG("Refusing to create new hci_chan");
1531                 return NULL;
1532         }
1533
1534         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1535         if (!chan)
1536                 return NULL;
1537
1538         chan->conn = hci_conn_get(conn);
1539         skb_queue_head_init(&chan->data_q);
1540         chan->state = BT_CONNECTED;
1541
1542         list_add_rcu(&chan->list, &conn->chan_list);
1543
1544         return chan;
1545 }
1546
1547 void hci_chan_del(struct hci_chan *chan)
1548 {
1549         struct hci_conn *conn = chan->conn;
1550         struct hci_dev *hdev = conn->hdev;
1551
1552         BT_DBG("%s hcon %pK chan %pK", hdev->name, conn, chan);
1553
1554         list_del_rcu(&chan->list);
1555
1556         synchronize_rcu();
1557
1558         /* Prevent new hci_chan's to be created for this hci_conn */
1559         set_bit(HCI_CONN_DROP, &conn->flags);
1560
1561         hci_conn_put(conn);
1562
1563         skb_queue_purge(&chan->data_q);
1564         kfree(chan);
1565 }
1566
1567 void hci_chan_list_flush(struct hci_conn *conn)
1568 {
1569         struct hci_chan *chan, *n;
1570
1571         BT_DBG("hcon %pK", conn);
1572
1573         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1574                 hci_chan_del(chan);
1575 }
1576
1577 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1578                                                  __u16 handle)
1579 {
1580         struct hci_chan *hchan;
1581
1582         list_for_each_entry(hchan, &hcon->chan_list, list) {
1583                 if (hchan->handle == handle)
1584                         return hchan;
1585         }
1586
1587         return NULL;
1588 }
1589
1590 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1591 {
1592         struct hci_conn_hash *h = &hdev->conn_hash;
1593         struct hci_conn *hcon;
1594         struct hci_chan *hchan = NULL;
1595
1596         rcu_read_lock();
1597
1598         list_for_each_entry_rcu(hcon, &h->list, list) {
1599                 hchan = __hci_chan_lookup_handle(hcon, handle);
1600                 if (hchan)
1601                         break;
1602         }
1603
1604         rcu_read_unlock();
1605
1606         return hchan;
1607 }