OSDN Git Service

cifs: fail i/o on soft mounts if sessionsetup errors out
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / net / bluetooth / hci_sock.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "mgmt_util.h"
36
37 static LIST_HEAD(mgmt_chan_list);
38 static DEFINE_MUTEX(mgmt_chan_list_lock);
39
40 static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
42 /* ----- HCI socket interface ----- */
43
44 /* Socket info */
45 #define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47 struct hci_pinfo {
48         struct bt_sock    bt;
49         struct hci_dev    *hdev;
50         struct hci_filter filter;
51         __u32             cmsg_mask;
52         unsigned short    channel;
53         unsigned long     flags;
54 };
55
56 void hci_sock_set_flag(struct sock *sk, int nr)
57 {
58         set_bit(nr, &hci_pi(sk)->flags);
59 }
60
61 void hci_sock_clear_flag(struct sock *sk, int nr)
62 {
63         clear_bit(nr, &hci_pi(sk)->flags);
64 }
65
66 int hci_sock_test_flag(struct sock *sk, int nr)
67 {
68         return test_bit(nr, &hci_pi(sk)->flags);
69 }
70
71 unsigned short hci_sock_get_channel(struct sock *sk)
72 {
73         return hci_pi(sk)->channel;
74 }
75
76 static inline int hci_test_bit(int nr, const void *addr)
77 {
78         return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79 }
80
81 /* Security filter */
82 #define HCI_SFLT_MAX_OGF  5
83
84 struct hci_sec_filter {
85         __u32 type_mask;
86         __u32 event_mask[2];
87         __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88 };
89
90 static const struct hci_sec_filter hci_sec_filter = {
91         /* Packet types */
92         0x10,
93         /* Events */
94         { 0x1000d9fe, 0x0000b00c },
95         /* Commands */
96         {
97                 { 0x0 },
98                 /* OGF_LINK_CTL */
99                 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100                 /* OGF_LINK_POLICY */
101                 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
102                 /* OGF_HOST_CTL */
103                 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104                 /* OGF_INFO_PARAM */
105                 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106                 /* OGF_STATUS_PARAM */
107                 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108         }
109 };
110
111 static struct bt_sock_list hci_sk_list = {
112         .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113 };
114
115 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116 {
117         struct hci_filter *flt;
118         int flt_type, flt_event;
119
120         /* Apply filter */
121         flt = &hci_pi(sk)->filter;
122
123         flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
124
125         if (!test_bit(flt_type, &flt->type_mask))
126                 return true;
127
128         /* Extra filter for event packets only */
129         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130                 return false;
131
132         flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134         if (!hci_test_bit(flt_event, &flt->event_mask))
135                 return true;
136
137         /* Check filter only when opcode is set */
138         if (!flt->opcode)
139                 return false;
140
141         if (flt_event == HCI_EV_CMD_COMPLETE &&
142             flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143                 return true;
144
145         if (flt_event == HCI_EV_CMD_STATUS &&
146             flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147                 return true;
148
149         return false;
150 }
151
152 /* Send frame to RAW socket */
153 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
154 {
155         struct sock *sk;
156         struct sk_buff *skb_copy = NULL;
157
158         BT_DBG("hdev %p len %d", hdev, skb->len);
159
160         read_lock(&hci_sk_list.lock);
161
162         sk_for_each(sk, &hci_sk_list.head) {
163                 struct sk_buff *nskb;
164
165                 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166                         continue;
167
168                 /* Don't send frame to the socket it came from */
169                 if (skb->sk == sk)
170                         continue;
171
172                 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173                         if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
174                             bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
175                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
176                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
177                                 continue;
178                         if (is_filtered_packet(sk, skb))
179                                 continue;
180                 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181                         if (!bt_cb(skb)->incoming)
182                                 continue;
183                         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
184                             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
185                             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186                                 continue;
187                 } else {
188                         /* Don't send frame to other channel types */
189                         continue;
190                 }
191
192                 if (!skb_copy) {
193                         /* Create a private copy with headroom */
194                         skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
195                         if (!skb_copy)
196                                 continue;
197
198                         /* Put type byte before the data */
199                         memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200                 }
201
202                 nskb = skb_clone(skb_copy, GFP_ATOMIC);
203                 if (!nskb)
204                         continue;
205
206                 if (sock_queue_rcv_skb(sk, nskb))
207                         kfree_skb(nskb);
208         }
209
210         read_unlock(&hci_sk_list.lock);
211
212         kfree_skb(skb_copy);
213 }
214
215 /* Send frame to sockets with specific channel */
216 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
217                          int flag, struct sock *skip_sk)
218 {
219         struct sock *sk;
220
221         BT_DBG("channel %u len %d", channel, skb->len);
222
223         read_lock(&hci_sk_list.lock);
224
225         sk_for_each(sk, &hci_sk_list.head) {
226                 struct sk_buff *nskb;
227
228                 /* Ignore socket without the flag set */
229                 if (!hci_sock_test_flag(sk, flag))
230                         continue;
231
232                 /* Skip the original socket */
233                 if (sk == skip_sk)
234                         continue;
235
236                 if (sk->sk_state != BT_BOUND)
237                         continue;
238
239                 if (hci_pi(sk)->channel != channel)
240                         continue;
241
242                 nskb = skb_clone(skb, GFP_ATOMIC);
243                 if (!nskb)
244                         continue;
245
246                 if (sock_queue_rcv_skb(sk, nskb))
247                         kfree_skb(nskb);
248         }
249
250         read_unlock(&hci_sk_list.lock);
251 }
252
253 /* Send frame to monitor socket */
254 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
255 {
256         struct sk_buff *skb_copy = NULL;
257         struct hci_mon_hdr *hdr;
258         __le16 opcode;
259
260         if (!atomic_read(&monitor_promisc))
261                 return;
262
263         BT_DBG("hdev %p len %d", hdev, skb->len);
264
265         switch (bt_cb(skb)->pkt_type) {
266         case HCI_COMMAND_PKT:
267                 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268                 break;
269         case HCI_EVENT_PKT:
270                 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
271                 break;
272         case HCI_ACLDATA_PKT:
273                 if (bt_cb(skb)->incoming)
274                         opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
275                 else
276                         opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
277                 break;
278         case HCI_SCODATA_PKT:
279                 if (bt_cb(skb)->incoming)
280                         opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
281                 else
282                         opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283                 break;
284         case HCI_DIAG_PKT:
285                 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
286                 break;
287         default:
288                 return;
289         }
290
291         /* Create a private copy with headroom */
292         skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
293         if (!skb_copy)
294                 return;
295
296         /* Put header before the data */
297         hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
298         hdr->opcode = opcode;
299         hdr->index = cpu_to_le16(hdev->id);
300         hdr->len = cpu_to_le16(skb->len);
301
302         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
303                             HCI_SOCK_TRUSTED, NULL);
304         kfree_skb(skb_copy);
305 }
306
307 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
308 {
309         struct hci_mon_hdr *hdr;
310         struct hci_mon_new_index *ni;
311         struct hci_mon_index_info *ii;
312         struct sk_buff *skb;
313         __le16 opcode;
314
315         switch (event) {
316         case HCI_DEV_REG:
317                 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
318                 if (!skb)
319                         return NULL;
320
321                 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
322                 ni->type = hdev->dev_type;
323                 ni->bus = hdev->bus;
324                 bacpy(&ni->bdaddr, &hdev->bdaddr);
325                 memcpy(ni->name, hdev->name, 8);
326
327                 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
328                 break;
329
330         case HCI_DEV_UNREG:
331                 skb = bt_skb_alloc(0, GFP_ATOMIC);
332                 if (!skb)
333                         return NULL;
334
335                 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
336                 break;
337
338         case HCI_DEV_SETUP:
339                 if (hdev->manufacturer == 0xffff)
340                         return NULL;
341
342                 /* fall through */
343
344         case HCI_DEV_UP:
345                 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
346                 if (!skb)
347                         return NULL;
348
349                 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
350                 bacpy(&ii->bdaddr, &hdev->bdaddr);
351                 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
352
353                 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
354                 break;
355
356         case HCI_DEV_OPEN:
357                 skb = bt_skb_alloc(0, GFP_ATOMIC);
358                 if (!skb)
359                         return NULL;
360
361                 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
362                 break;
363
364         case HCI_DEV_CLOSE:
365                 skb = bt_skb_alloc(0, GFP_ATOMIC);
366                 if (!skb)
367                         return NULL;
368
369                 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
370                 break;
371
372         default:
373                 return NULL;
374         }
375
376         __net_timestamp(skb);
377
378         hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
379         hdr->opcode = opcode;
380         hdr->index = cpu_to_le16(hdev->id);
381         hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
382
383         return skb;
384 }
385
386 static void send_monitor_replay(struct sock *sk)
387 {
388         struct hci_dev *hdev;
389
390         read_lock(&hci_dev_list_lock);
391
392         list_for_each_entry(hdev, &hci_dev_list, list) {
393                 struct sk_buff *skb;
394
395                 skb = create_monitor_event(hdev, HCI_DEV_REG);
396                 if (!skb)
397                         continue;
398
399                 if (sock_queue_rcv_skb(sk, skb))
400                         kfree_skb(skb);
401
402                 if (!test_bit(HCI_RUNNING, &hdev->flags))
403                         continue;
404
405                 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
406                 if (!skb)
407                         continue;
408
409                 if (sock_queue_rcv_skb(sk, skb))
410                         kfree_skb(skb);
411
412                 if (test_bit(HCI_UP, &hdev->flags))
413                         skb = create_monitor_event(hdev, HCI_DEV_UP);
414                 else if (hci_dev_test_flag(hdev, HCI_SETUP))
415                         skb = create_monitor_event(hdev, HCI_DEV_SETUP);
416                 else
417                         skb = NULL;
418
419                 if (skb) {
420                         if (sock_queue_rcv_skb(sk, skb))
421                                 kfree_skb(skb);
422                 }
423         }
424
425         read_unlock(&hci_dev_list_lock);
426 }
427
428 /* Generate internal stack event */
429 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
430 {
431         struct hci_event_hdr *hdr;
432         struct hci_ev_stack_internal *ev;
433         struct sk_buff *skb;
434
435         skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
436         if (!skb)
437                 return;
438
439         hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
440         hdr->evt  = HCI_EV_STACK_INTERNAL;
441         hdr->plen = sizeof(*ev) + dlen;
442
443         ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
444         ev->type = type;
445         memcpy(ev->data, data, dlen);
446
447         bt_cb(skb)->incoming = 1;
448         __net_timestamp(skb);
449
450         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
451         hci_send_to_sock(hdev, skb);
452         kfree_skb(skb);
453 }
454
455 void hci_sock_dev_event(struct hci_dev *hdev, int event)
456 {
457         BT_DBG("hdev %s event %d", hdev->name, event);
458
459         if (atomic_read(&monitor_promisc)) {
460                 struct sk_buff *skb;
461
462                 /* Send event to monitor */
463                 skb = create_monitor_event(hdev, event);
464                 if (skb) {
465                         hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466                                             HCI_SOCK_TRUSTED, NULL);
467                         kfree_skb(skb);
468                 }
469         }
470
471         if (event <= HCI_DEV_DOWN) {
472                 struct hci_ev_si_device ev;
473
474                 /* Send event to sockets */
475                 ev.event  = event;
476                 ev.dev_id = hdev->id;
477                 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
478         }
479
480         if (event == HCI_DEV_UNREG) {
481                 struct sock *sk;
482
483                 /* Detach sockets from device */
484                 read_lock(&hci_sk_list.lock);
485                 sk_for_each(sk, &hci_sk_list.head) {
486                         bh_lock_sock_nested(sk);
487                         if (hci_pi(sk)->hdev == hdev) {
488                                 hci_pi(sk)->hdev = NULL;
489                                 sk->sk_err = EPIPE;
490                                 sk->sk_state = BT_OPEN;
491                                 sk->sk_state_change(sk);
492
493                                 hci_dev_put(hdev);
494                         }
495                         bh_unlock_sock(sk);
496                 }
497                 read_unlock(&hci_sk_list.lock);
498         }
499 }
500
501 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
502 {
503         struct hci_mgmt_chan *c;
504
505         list_for_each_entry(c, &mgmt_chan_list, list) {
506                 if (c->channel == channel)
507                         return c;
508         }
509
510         return NULL;
511 }
512
513 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
514 {
515         struct hci_mgmt_chan *c;
516
517         mutex_lock(&mgmt_chan_list_lock);
518         c = __hci_mgmt_chan_find(channel);
519         mutex_unlock(&mgmt_chan_list_lock);
520
521         return c;
522 }
523
524 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
525 {
526         if (c->channel < HCI_CHANNEL_CONTROL)
527                 return -EINVAL;
528
529         mutex_lock(&mgmt_chan_list_lock);
530         if (__hci_mgmt_chan_find(c->channel)) {
531                 mutex_unlock(&mgmt_chan_list_lock);
532                 return -EALREADY;
533         }
534
535         list_add_tail(&c->list, &mgmt_chan_list);
536
537         mutex_unlock(&mgmt_chan_list_lock);
538
539         return 0;
540 }
541 EXPORT_SYMBOL(hci_mgmt_chan_register);
542
543 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
544 {
545         mutex_lock(&mgmt_chan_list_lock);
546         list_del(&c->list);
547         mutex_unlock(&mgmt_chan_list_lock);
548 }
549 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
550
551 static int hci_sock_release(struct socket *sock)
552 {
553         struct sock *sk = sock->sk;
554         struct hci_dev *hdev;
555
556         BT_DBG("sock %p sk %p", sock, sk);
557
558         if (!sk)
559                 return 0;
560
561         if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
562                 atomic_dec(&monitor_promisc);
563
564         bt_sock_unlink(&hci_sk_list, sk);
565
566         hdev = hci_pi(sk)->hdev;
567         if (hdev) {
568                 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
569                         /* When releasing an user channel exclusive access,
570                          * call hci_dev_do_close directly instead of calling
571                          * hci_dev_close to ensure the exclusive access will
572                          * be released and the controller brought back down.
573                          *
574                          * The checking of HCI_AUTO_OFF is not needed in this
575                          * case since it will have been cleared already when
576                          * opening the user channel.
577                          */
578                         hci_dev_do_close(hdev);
579                         hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
580                         mgmt_index_added(hdev);
581                 }
582
583                 atomic_dec(&hdev->promisc);
584                 hci_dev_put(hdev);
585         }
586
587         sock_orphan(sk);
588
589         skb_queue_purge(&sk->sk_receive_queue);
590         skb_queue_purge(&sk->sk_write_queue);
591
592         sock_put(sk);
593         return 0;
594 }
595
596 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
597 {
598         bdaddr_t bdaddr;
599         int err;
600
601         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
602                 return -EFAULT;
603
604         hci_dev_lock(hdev);
605
606         err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
607
608         hci_dev_unlock(hdev);
609
610         return err;
611 }
612
613 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
614 {
615         bdaddr_t bdaddr;
616         int err;
617
618         if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
619                 return -EFAULT;
620
621         hci_dev_lock(hdev);
622
623         err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
624
625         hci_dev_unlock(hdev);
626
627         return err;
628 }
629
630 /* Ioctls that require bound socket */
631 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
632                                 unsigned long arg)
633 {
634         struct hci_dev *hdev = hci_pi(sk)->hdev;
635
636         if (!hdev)
637                 return -EBADFD;
638
639         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
640                 return -EBUSY;
641
642         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
643                 return -EOPNOTSUPP;
644
645         if (hdev->dev_type != HCI_BREDR)
646                 return -EOPNOTSUPP;
647
648         switch (cmd) {
649         case HCISETRAW:
650                 if (!capable(CAP_NET_ADMIN))
651                         return -EPERM;
652                 return -EOPNOTSUPP;
653
654         case HCIGETCONNINFO:
655                 return hci_get_conn_info(hdev, (void __user *) arg);
656
657         case HCIGETAUTHINFO:
658                 return hci_get_auth_info(hdev, (void __user *) arg);
659
660         case HCIBLOCKADDR:
661                 if (!capable(CAP_NET_ADMIN))
662                         return -EPERM;
663                 return hci_sock_blacklist_add(hdev, (void __user *) arg);
664
665         case HCIUNBLOCKADDR:
666                 if (!capable(CAP_NET_ADMIN))
667                         return -EPERM;
668                 return hci_sock_blacklist_del(hdev, (void __user *) arg);
669         }
670
671         return -ENOIOCTLCMD;
672 }
673
674 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
675                           unsigned long arg)
676 {
677         void __user *argp = (void __user *) arg;
678         struct sock *sk = sock->sk;
679         int err;
680
681         BT_DBG("cmd %x arg %lx", cmd, arg);
682
683         lock_sock(sk);
684
685         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
686                 err = -EBADFD;
687                 goto done;
688         }
689
690         release_sock(sk);
691
692         switch (cmd) {
693         case HCIGETDEVLIST:
694                 return hci_get_dev_list(argp);
695
696         case HCIGETDEVINFO:
697                 return hci_get_dev_info(argp);
698
699         case HCIGETCONNLIST:
700                 return hci_get_conn_list(argp);
701
702         case HCIDEVUP:
703                 if (!capable(CAP_NET_ADMIN))
704                         return -EPERM;
705                 return hci_dev_open(arg);
706
707         case HCIDEVDOWN:
708                 if (!capable(CAP_NET_ADMIN))
709                         return -EPERM;
710                 return hci_dev_close(arg);
711
712         case HCIDEVRESET:
713                 if (!capable(CAP_NET_ADMIN))
714                         return -EPERM;
715                 return hci_dev_reset(arg);
716
717         case HCIDEVRESTAT:
718                 if (!capable(CAP_NET_ADMIN))
719                         return -EPERM;
720                 return hci_dev_reset_stat(arg);
721
722         case HCISETSCAN:
723         case HCISETAUTH:
724         case HCISETENCRYPT:
725         case HCISETPTYPE:
726         case HCISETLINKPOL:
727         case HCISETLINKMODE:
728         case HCISETACLMTU:
729         case HCISETSCOMTU:
730                 if (!capable(CAP_NET_ADMIN))
731                         return -EPERM;
732                 return hci_dev_cmd(cmd, argp);
733
734         case HCIINQUIRY:
735                 return hci_inquiry(argp);
736         }
737
738         lock_sock(sk);
739
740         err = hci_sock_bound_ioctl(sk, cmd, arg);
741
742 done:
743         release_sock(sk);
744         return err;
745 }
746
747 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
748                          int addr_len)
749 {
750         struct sockaddr_hci haddr;
751         struct sock *sk = sock->sk;
752         struct hci_dev *hdev = NULL;
753         int len, err = 0;
754
755         BT_DBG("sock %p sk %p", sock, sk);
756
757         if (!addr)
758                 return -EINVAL;
759
760         memset(&haddr, 0, sizeof(haddr));
761         len = min_t(unsigned int, sizeof(haddr), addr_len);
762         memcpy(&haddr, addr, len);
763
764         if (haddr.hci_family != AF_BLUETOOTH)
765                 return -EINVAL;
766
767         lock_sock(sk);
768
769         if (sk->sk_state == BT_BOUND) {
770                 err = -EALREADY;
771                 goto done;
772         }
773
774         switch (haddr.hci_channel) {
775         case HCI_CHANNEL_RAW:
776                 if (hci_pi(sk)->hdev) {
777                         err = -EALREADY;
778                         goto done;
779                 }
780
781                 if (haddr.hci_dev != HCI_DEV_NONE) {
782                         hdev = hci_dev_get(haddr.hci_dev);
783                         if (!hdev) {
784                                 err = -ENODEV;
785                                 goto done;
786                         }
787
788                         atomic_inc(&hdev->promisc);
789                 }
790
791                 hci_pi(sk)->hdev = hdev;
792                 break;
793
794         case HCI_CHANNEL_USER:
795                 if (hci_pi(sk)->hdev) {
796                         err = -EALREADY;
797                         goto done;
798                 }
799
800                 if (haddr.hci_dev == HCI_DEV_NONE) {
801                         err = -EINVAL;
802                         goto done;
803                 }
804
805                 if (!capable(CAP_NET_ADMIN)) {
806                         err = -EPERM;
807                         goto done;
808                 }
809
810                 hdev = hci_dev_get(haddr.hci_dev);
811                 if (!hdev) {
812                         err = -ENODEV;
813                         goto done;
814                 }
815
816                 if (test_bit(HCI_INIT, &hdev->flags) ||
817                     hci_dev_test_flag(hdev, HCI_SETUP) ||
818                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
819                     (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
820                      test_bit(HCI_UP, &hdev->flags))) {
821                         err = -EBUSY;
822                         hci_dev_put(hdev);
823                         goto done;
824                 }
825
826                 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
827                         err = -EUSERS;
828                         hci_dev_put(hdev);
829                         goto done;
830                 }
831
832                 mgmt_index_removed(hdev);
833
834                 err = hci_dev_open(hdev->id);
835                 if (err) {
836                         if (err == -EALREADY) {
837                                 /* In case the transport is already up and
838                                  * running, clear the error here.
839                                  *
840                                  * This can happen when opening an user
841                                  * channel and HCI_AUTO_OFF grace period
842                                  * is still active.
843                                  */
844                                 err = 0;
845                         } else {
846                                 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
847                                 mgmt_index_added(hdev);
848                                 hci_dev_put(hdev);
849                                 goto done;
850                         }
851                 }
852
853                 atomic_inc(&hdev->promisc);
854
855                 hci_pi(sk)->hdev = hdev;
856                 break;
857
858         case HCI_CHANNEL_MONITOR:
859                 if (haddr.hci_dev != HCI_DEV_NONE) {
860                         err = -EINVAL;
861                         goto done;
862                 }
863
864                 if (!capable(CAP_NET_RAW)) {
865                         err = -EPERM;
866                         goto done;
867                 }
868
869                 /* The monitor interface is restricted to CAP_NET_RAW
870                  * capabilities and with that implicitly trusted.
871                  */
872                 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
873
874                 send_monitor_replay(sk);
875
876                 atomic_inc(&monitor_promisc);
877                 break;
878
879         default:
880                 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
881                         err = -EINVAL;
882                         goto done;
883                 }
884
885                 if (haddr.hci_dev != HCI_DEV_NONE) {
886                         err = -EINVAL;
887                         goto done;
888                 }
889
890                 /* Users with CAP_NET_ADMIN capabilities are allowed
891                  * access to all management commands and events. For
892                  * untrusted users the interface is restricted and
893                  * also only untrusted events are sent.
894                  */
895                 if (capable(CAP_NET_ADMIN))
896                         hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
897
898                 /* At the moment the index and unconfigured index events
899                  * are enabled unconditionally. Setting them on each
900                  * socket when binding keeps this functionality. They
901                  * however might be cleared later and then sending of these
902                  * events will be disabled, but that is then intentional.
903                  *
904                  * This also enables generic events that are safe to be
905                  * received by untrusted users. Example for such events
906                  * are changes to settings, class of device, name etc.
907                  */
908                 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
909                         hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
910                         hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
911                         hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
912                 }
913                 break;
914         }
915
916
917         hci_pi(sk)->channel = haddr.hci_channel;
918         sk->sk_state = BT_BOUND;
919
920 done:
921         release_sock(sk);
922         return err;
923 }
924
925 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
926                             int *addr_len, int peer)
927 {
928         struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
929         struct sock *sk = sock->sk;
930         struct hci_dev *hdev;
931         int err = 0;
932
933         BT_DBG("sock %p sk %p", sock, sk);
934
935         if (peer)
936                 return -EOPNOTSUPP;
937
938         lock_sock(sk);
939
940         hdev = hci_pi(sk)->hdev;
941         if (!hdev) {
942                 err = -EBADFD;
943                 goto done;
944         }
945
946         *addr_len = sizeof(*haddr);
947         haddr->hci_family = AF_BLUETOOTH;
948         haddr->hci_dev    = hdev->id;
949         haddr->hci_channel= hci_pi(sk)->channel;
950
951 done:
952         release_sock(sk);
953         return err;
954 }
955
956 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
957                           struct sk_buff *skb)
958 {
959         __u32 mask = hci_pi(sk)->cmsg_mask;
960
961         if (mask & HCI_CMSG_DIR) {
962                 int incoming = bt_cb(skb)->incoming;
963                 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
964                          &incoming);
965         }
966
967         if (mask & HCI_CMSG_TSTAMP) {
968 #ifdef CONFIG_COMPAT
969                 struct compat_timeval ctv;
970 #endif
971                 struct timeval tv;
972                 void *data;
973                 int len;
974
975                 skb_get_timestamp(skb, &tv);
976
977                 data = &tv;
978                 len = sizeof(tv);
979 #ifdef CONFIG_COMPAT
980                 if (!COMPAT_USE_64BIT_TIME &&
981                     (msg->msg_flags & MSG_CMSG_COMPAT)) {
982                         ctv.tv_sec = tv.tv_sec;
983                         ctv.tv_usec = tv.tv_usec;
984                         data = &ctv;
985                         len = sizeof(ctv);
986                 }
987 #endif
988
989                 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
990         }
991 }
992
993 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
994                             int flags)
995 {
996         int noblock = flags & MSG_DONTWAIT;
997         struct sock *sk = sock->sk;
998         struct sk_buff *skb;
999         int copied, err;
1000
1001         BT_DBG("sock %p, sk %p", sock, sk);
1002
1003         if (flags & MSG_OOB)
1004                 return -EOPNOTSUPP;
1005
1006         if (sk->sk_state == BT_CLOSED)
1007                 return 0;
1008
1009         skb = skb_recv_datagram(sk, flags, noblock, &err);
1010         if (!skb)
1011                 return err;
1012
1013         copied = skb->len;
1014         if (len < copied) {
1015                 msg->msg_flags |= MSG_TRUNC;
1016                 copied = len;
1017         }
1018
1019         skb_reset_transport_header(skb);
1020         err = skb_copy_datagram_msg(skb, 0, msg, copied);
1021
1022         switch (hci_pi(sk)->channel) {
1023         case HCI_CHANNEL_RAW:
1024                 hci_sock_cmsg(sk, msg, skb);
1025                 break;
1026         case HCI_CHANNEL_USER:
1027         case HCI_CHANNEL_MONITOR:
1028                 sock_recv_timestamp(msg, sk, skb);
1029                 break;
1030         default:
1031                 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1032                         sock_recv_timestamp(msg, sk, skb);
1033                 break;
1034         }
1035
1036         skb_free_datagram(sk, skb);
1037
1038         return err ? : copied;
1039 }
1040
1041 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1042                         struct msghdr *msg, size_t msglen)
1043 {
1044         void *buf;
1045         u8 *cp;
1046         struct mgmt_hdr *hdr;
1047         u16 opcode, index, len;
1048         struct hci_dev *hdev = NULL;
1049         const struct hci_mgmt_handler *handler;
1050         bool var_len, no_hdev;
1051         int err;
1052
1053         BT_DBG("got %zu bytes", msglen);
1054
1055         if (msglen < sizeof(*hdr))
1056                 return -EINVAL;
1057
1058         buf = kmalloc(msglen, GFP_KERNEL);
1059         if (!buf)
1060                 return -ENOMEM;
1061
1062         if (memcpy_from_msg(buf, msg, msglen)) {
1063                 err = -EFAULT;
1064                 goto done;
1065         }
1066
1067         hdr = buf;
1068         opcode = __le16_to_cpu(hdr->opcode);
1069         index = __le16_to_cpu(hdr->index);
1070         len = __le16_to_cpu(hdr->len);
1071
1072         if (len != msglen - sizeof(*hdr)) {
1073                 err = -EINVAL;
1074                 goto done;
1075         }
1076
1077         if (opcode >= chan->handler_count ||
1078             chan->handlers[opcode].func == NULL) {
1079                 BT_DBG("Unknown op %u", opcode);
1080                 err = mgmt_cmd_status(sk, index, opcode,
1081                                       MGMT_STATUS_UNKNOWN_COMMAND);
1082                 goto done;
1083         }
1084
1085         handler = &chan->handlers[opcode];
1086
1087         if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1088             !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1089                 err = mgmt_cmd_status(sk, index, opcode,
1090                                       MGMT_STATUS_PERMISSION_DENIED);
1091                 goto done;
1092         }
1093
1094         if (index != MGMT_INDEX_NONE) {
1095                 hdev = hci_dev_get(index);
1096                 if (!hdev) {
1097                         err = mgmt_cmd_status(sk, index, opcode,
1098                                               MGMT_STATUS_INVALID_INDEX);
1099                         goto done;
1100                 }
1101
1102                 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1103                     hci_dev_test_flag(hdev, HCI_CONFIG) ||
1104                     hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1105                         err = mgmt_cmd_status(sk, index, opcode,
1106                                               MGMT_STATUS_INVALID_INDEX);
1107                         goto done;
1108                 }
1109
1110                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1111                     !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1112                         err = mgmt_cmd_status(sk, index, opcode,
1113                                               MGMT_STATUS_INVALID_INDEX);
1114                         goto done;
1115                 }
1116         }
1117
1118         no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1119         if (no_hdev != !hdev) {
1120                 err = mgmt_cmd_status(sk, index, opcode,
1121                                       MGMT_STATUS_INVALID_INDEX);
1122                 goto done;
1123         }
1124
1125         var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1126         if ((var_len && len < handler->data_len) ||
1127             (!var_len && len != handler->data_len)) {
1128                 err = mgmt_cmd_status(sk, index, opcode,
1129                                       MGMT_STATUS_INVALID_PARAMS);
1130                 goto done;
1131         }
1132
1133         if (hdev && chan->hdev_init)
1134                 chan->hdev_init(sk, hdev);
1135
1136         cp = buf + sizeof(*hdr);
1137
1138         err = handler->func(sk, hdev, cp, len);
1139         if (err < 0)
1140                 goto done;
1141
1142         err = msglen;
1143
1144 done:
1145         if (hdev)
1146                 hci_dev_put(hdev);
1147
1148         kfree(buf);
1149         return err;
1150 }
1151
1152 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1153                             size_t len)
1154 {
1155         struct sock *sk = sock->sk;
1156         struct hci_mgmt_chan *chan;
1157         struct hci_dev *hdev;
1158         struct sk_buff *skb;
1159         int err;
1160
1161         BT_DBG("sock %p sk %p", sock, sk);
1162
1163         if (msg->msg_flags & MSG_OOB)
1164                 return -EOPNOTSUPP;
1165
1166         if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1167                                MSG_CMSG_COMPAT))
1168                 return -EINVAL;
1169
1170         if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1171                 return -EINVAL;
1172
1173         lock_sock(sk);
1174
1175         switch (hci_pi(sk)->channel) {
1176         case HCI_CHANNEL_RAW:
1177         case HCI_CHANNEL_USER:
1178                 break;
1179         case HCI_CHANNEL_MONITOR:
1180                 err = -EOPNOTSUPP;
1181                 goto done;
1182         default:
1183                 mutex_lock(&mgmt_chan_list_lock);
1184                 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1185                 if (chan)
1186                         err = hci_mgmt_cmd(chan, sk, msg, len);
1187                 else
1188                         err = -EINVAL;
1189
1190                 mutex_unlock(&mgmt_chan_list_lock);
1191                 goto done;
1192         }
1193
1194         hdev = hci_pi(sk)->hdev;
1195         if (!hdev) {
1196                 err = -EBADFD;
1197                 goto done;
1198         }
1199
1200         if (!test_bit(HCI_UP, &hdev->flags)) {
1201                 err = -ENETDOWN;
1202                 goto done;
1203         }
1204
1205         skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1206         if (!skb)
1207                 goto done;
1208
1209         if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1210                 err = -EFAULT;
1211                 goto drop;
1212         }
1213
1214         bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1215         skb_pull(skb, 1);
1216
1217         if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1218                 /* No permission check is needed for user channel
1219                  * since that gets enforced when binding the socket.
1220                  *
1221                  * However check that the packet type is valid.
1222                  */
1223                 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1224                     bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1225                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1226                         err = -EINVAL;
1227                         goto drop;
1228                 }
1229
1230                 skb_queue_tail(&hdev->raw_q, skb);
1231                 queue_work(hdev->workqueue, &hdev->tx_work);
1232         } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1233                 u16 opcode = get_unaligned_le16(skb->data);
1234                 u16 ogf = hci_opcode_ogf(opcode);
1235                 u16 ocf = hci_opcode_ocf(opcode);
1236
1237                 if (((ogf > HCI_SFLT_MAX_OGF) ||
1238                      !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1239                                    &hci_sec_filter.ocf_mask[ogf])) &&
1240                     !capable(CAP_NET_RAW)) {
1241                         err = -EPERM;
1242                         goto drop;
1243                 }
1244
1245                 if (ogf == 0x3f) {
1246                         skb_queue_tail(&hdev->raw_q, skb);
1247                         queue_work(hdev->workqueue, &hdev->tx_work);
1248                 } else {
1249                         /* Stand-alone HCI commands must be flagged as
1250                          * single-command requests.
1251                          */
1252                         bt_cb(skb)->hci.req_start = true;
1253
1254                         skb_queue_tail(&hdev->cmd_q, skb);
1255                         queue_work(hdev->workqueue, &hdev->cmd_work);
1256                 }
1257         } else {
1258                 if (!capable(CAP_NET_RAW)) {
1259                         err = -EPERM;
1260                         goto drop;
1261                 }
1262
1263                 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1264                     bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1265                         err = -EINVAL;
1266                         goto drop;
1267                 }
1268
1269                 skb_queue_tail(&hdev->raw_q, skb);
1270                 queue_work(hdev->workqueue, &hdev->tx_work);
1271         }
1272
1273         err = len;
1274
1275 done:
1276         release_sock(sk);
1277         return err;
1278
1279 drop:
1280         kfree_skb(skb);
1281         goto done;
1282 }
1283
1284 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1285                                char __user *optval, unsigned int len)
1286 {
1287         struct hci_ufilter uf = { .opcode = 0 };
1288         struct sock *sk = sock->sk;
1289         int err = 0, opt = 0;
1290
1291         BT_DBG("sk %p, opt %d", sk, optname);
1292
1293         lock_sock(sk);
1294
1295         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1296                 err = -EBADFD;
1297                 goto done;
1298         }
1299
1300         switch (optname) {
1301         case HCI_DATA_DIR:
1302                 if (get_user(opt, (int __user *)optval)) {
1303                         err = -EFAULT;
1304                         break;
1305                 }
1306
1307                 if (opt)
1308                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1309                 else
1310                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1311                 break;
1312
1313         case HCI_TIME_STAMP:
1314                 if (get_user(opt, (int __user *)optval)) {
1315                         err = -EFAULT;
1316                         break;
1317                 }
1318
1319                 if (opt)
1320                         hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1321                 else
1322                         hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1323                 break;
1324
1325         case HCI_FILTER:
1326                 {
1327                         struct hci_filter *f = &hci_pi(sk)->filter;
1328
1329                         uf.type_mask = f->type_mask;
1330                         uf.opcode    = f->opcode;
1331                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1332                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1333                 }
1334
1335                 len = min_t(unsigned int, len, sizeof(uf));
1336                 if (copy_from_user(&uf, optval, len)) {
1337                         err = -EFAULT;
1338                         break;
1339                 }
1340
1341                 if (!capable(CAP_NET_RAW)) {
1342                         uf.type_mask &= hci_sec_filter.type_mask;
1343                         uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1344                         uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1345                 }
1346
1347                 {
1348                         struct hci_filter *f = &hci_pi(sk)->filter;
1349
1350                         f->type_mask = uf.type_mask;
1351                         f->opcode    = uf.opcode;
1352                         *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1353                         *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1354                 }
1355                 break;
1356
1357         default:
1358                 err = -ENOPROTOOPT;
1359                 break;
1360         }
1361
1362 done:
1363         release_sock(sk);
1364         return err;
1365 }
1366
1367 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1368                                char __user *optval, int __user *optlen)
1369 {
1370         struct hci_ufilter uf;
1371         struct sock *sk = sock->sk;
1372         int len, opt, err = 0;
1373
1374         BT_DBG("sk %p, opt %d", sk, optname);
1375
1376         if (get_user(len, optlen))
1377                 return -EFAULT;
1378
1379         lock_sock(sk);
1380
1381         if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1382                 err = -EBADFD;
1383                 goto done;
1384         }
1385
1386         switch (optname) {
1387         case HCI_DATA_DIR:
1388                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1389                         opt = 1;
1390                 else
1391                         opt = 0;
1392
1393                 if (put_user(opt, optval))
1394                         err = -EFAULT;
1395                 break;
1396
1397         case HCI_TIME_STAMP:
1398                 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1399                         opt = 1;
1400                 else
1401                         opt = 0;
1402
1403                 if (put_user(opt, optval))
1404                         err = -EFAULT;
1405                 break;
1406
1407         case HCI_FILTER:
1408                 {
1409                         struct hci_filter *f = &hci_pi(sk)->filter;
1410
1411                         memset(&uf, 0, sizeof(uf));
1412                         uf.type_mask = f->type_mask;
1413                         uf.opcode    = f->opcode;
1414                         uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1415                         uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1416                 }
1417
1418                 len = min_t(unsigned int, len, sizeof(uf));
1419                 if (copy_to_user(optval, &uf, len))
1420                         err = -EFAULT;
1421                 break;
1422
1423         default:
1424                 err = -ENOPROTOOPT;
1425                 break;
1426         }
1427
1428 done:
1429         release_sock(sk);
1430         return err;
1431 }
1432
1433 static const struct proto_ops hci_sock_ops = {
1434         .family         = PF_BLUETOOTH,
1435         .owner          = THIS_MODULE,
1436         .release        = hci_sock_release,
1437         .bind           = hci_sock_bind,
1438         .getname        = hci_sock_getname,
1439         .sendmsg        = hci_sock_sendmsg,
1440         .recvmsg        = hci_sock_recvmsg,
1441         .ioctl          = hci_sock_ioctl,
1442         .poll           = datagram_poll,
1443         .listen         = sock_no_listen,
1444         .shutdown       = sock_no_shutdown,
1445         .setsockopt     = hci_sock_setsockopt,
1446         .getsockopt     = hci_sock_getsockopt,
1447         .connect        = sock_no_connect,
1448         .socketpair     = sock_no_socketpair,
1449         .accept         = sock_no_accept,
1450         .mmap           = sock_no_mmap
1451 };
1452
1453 static struct proto hci_sk_proto = {
1454         .name           = "HCI",
1455         .owner          = THIS_MODULE,
1456         .obj_size       = sizeof(struct hci_pinfo)
1457 };
1458
1459 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1460                            int kern)
1461 {
1462         struct sock *sk;
1463
1464         BT_DBG("sock %p", sock);
1465
1466         if (sock->type != SOCK_RAW)
1467                 return -ESOCKTNOSUPPORT;
1468
1469         sock->ops = &hci_sock_ops;
1470
1471         sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1472         if (!sk)
1473                 return -ENOMEM;
1474
1475         sock_init_data(sock, sk);
1476
1477         sock_reset_flag(sk, SOCK_ZAPPED);
1478
1479         sk->sk_protocol = protocol;
1480
1481         sock->state = SS_UNCONNECTED;
1482         sk->sk_state = BT_OPEN;
1483
1484         bt_sock_link(&hci_sk_list, sk);
1485         return 0;
1486 }
1487
1488 static const struct net_proto_family hci_sock_family_ops = {
1489         .family = PF_BLUETOOTH,
1490         .owner  = THIS_MODULE,
1491         .create = hci_sock_create,
1492 };
1493
1494 int __init hci_sock_init(void)
1495 {
1496         int err;
1497
1498         BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1499
1500         err = proto_register(&hci_sk_proto, 0);
1501         if (err < 0)
1502                 return err;
1503
1504         err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1505         if (err < 0) {
1506                 BT_ERR("HCI socket registration failed");
1507                 goto error;
1508         }
1509
1510         err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1511         if (err < 0) {
1512                 BT_ERR("Failed to create HCI proc file");
1513                 bt_sock_unregister(BTPROTO_HCI);
1514                 goto error;
1515         }
1516
1517         BT_INFO("HCI socket layer initialized");
1518
1519         return 0;
1520
1521 error:
1522         proto_unregister(&hci_sk_proto);
1523         return err;
1524 }
1525
1526 void hci_sock_cleanup(void)
1527 {
1528         bt_procfs_cleanup(&init_net, "hci");
1529         bt_sock_unregister(BTPROTO_HCI);
1530         proto_unregister(&hci_sk_proto);
1531 }