]> rtime.felk.cvut.cz Git - linux-imx.git/blob - net/bluetooth/hci_core.c
PM / QoS: Add pm_qos and dev_pm_qos to events-power.txt
[linux-imx.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         struct hci_cp_delete_stored_link_key cp;
345         __le16 param;
346         __u8 flt_type;
347
348         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
349         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350
351         /* Read Class of Device */
352         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353
354         /* Read Local Name */
355         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356
357         /* Read Voice Setting */
358         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359
360         /* Clear Event Filters */
361         flt_type = HCI_FLT_CLEAR_ALL;
362         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363
364         /* Connection accept timeout ~20 secs */
365         param = __constant_cpu_to_le16(0x7d00);
366         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367
368         bacpy(&cp.bdaddr, BDADDR_ANY);
369         cp.delete_all = 0x01;
370         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
371
372         /* Read page scan parameters */
373         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
375                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
376         }
377 }
378
379 static void le_setup(struct hci_request *req)
380 {
381         struct hci_dev *hdev = req->hdev;
382
383         /* Read LE Buffer Size */
384         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
385
386         /* Read LE Local Supported Features */
387         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
388
389         /* Read LE Advertising Channel TX Power */
390         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
391
392         /* Read LE White List Size */
393         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
394
395         /* Read LE Supported States */
396         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
397
398         /* LE-only controllers have LE implicitly enabled */
399         if (!lmp_bredr_capable(hdev))
400                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
401 }
402
403 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404 {
405         if (lmp_ext_inq_capable(hdev))
406                 return 0x02;
407
408         if (lmp_inq_rssi_capable(hdev))
409                 return 0x01;
410
411         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412             hdev->lmp_subver == 0x0757)
413                 return 0x01;
414
415         if (hdev->manufacturer == 15) {
416                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417                         return 0x01;
418                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419                         return 0x01;
420                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421                         return 0x01;
422         }
423
424         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425             hdev->lmp_subver == 0x1805)
426                 return 0x01;
427
428         return 0x00;
429 }
430
431 static void hci_setup_inquiry_mode(struct hci_request *req)
432 {
433         u8 mode;
434
435         mode = hci_get_inquiry_mode(req->hdev);
436
437         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
438 }
439
440 static void hci_setup_event_mask(struct hci_request *req)
441 {
442         struct hci_dev *hdev = req->hdev;
443
444         /* The second byte is 0xff instead of 0x9f (two reserved bits
445          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446          * command otherwise.
447          */
448         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451          * any event mask for pre 1.2 devices.
452          */
453         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454                 return;
455
456         if (lmp_bredr_capable(hdev)) {
457                 events[4] |= 0x01; /* Flow Specification Complete */
458                 events[4] |= 0x02; /* Inquiry Result with RSSI */
459                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460                 events[5] |= 0x08; /* Synchronous Connection Complete */
461                 events[5] |= 0x10; /* Synchronous Connection Changed */
462         }
463
464         if (lmp_inq_rssi_capable(hdev))
465                 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467         if (lmp_sniffsubr_capable(hdev))
468                 events[5] |= 0x20; /* Sniff Subrating */
469
470         if (lmp_pause_enc_capable(hdev))
471                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473         if (lmp_ext_inq_capable(hdev))
474                 events[5] |= 0x40; /* Extended Inquiry Result */
475
476         if (lmp_no_flush_capable(hdev))
477                 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479         if (lmp_lsto_capable(hdev))
480                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482         if (lmp_ssp_capable(hdev)) {
483                 events[6] |= 0x01;      /* IO Capability Request */
484                 events[6] |= 0x02;      /* IO Capability Response */
485                 events[6] |= 0x04;      /* User Confirmation Request */
486                 events[6] |= 0x08;      /* User Passkey Request */
487                 events[6] |= 0x10;      /* Remote OOB Data Request */
488                 events[6] |= 0x20;      /* Simple Pairing Complete */
489                 events[7] |= 0x04;      /* User Passkey Notification */
490                 events[7] |= 0x08;      /* Keypress Notification */
491                 events[7] |= 0x10;      /* Remote Host Supported
492                                          * Features Notification
493                                          */
494         }
495
496         if (lmp_le_capable(hdev))
497                 events[7] |= 0x20;      /* LE Meta-Event */
498
499         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
500
501         if (lmp_le_capable(hdev)) {
502                 memset(events, 0, sizeof(events));
503                 events[0] = 0x1f;
504                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505                             sizeof(events), events);
506         }
507 }
508
509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
510 {
511         struct hci_dev *hdev = req->hdev;
512
513         if (lmp_bredr_capable(hdev))
514                 bredr_setup(req);
515
516         if (lmp_le_capable(hdev))
517                 le_setup(req);
518
519         hci_setup_event_mask(req);
520
521         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
522                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524         if (lmp_ssp_capable(hdev)) {
525                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526                         u8 mode = 0x01;
527                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528                                     sizeof(mode), &mode);
529                 } else {
530                         struct hci_cp_write_eir cp;
531
532                         memset(hdev->eir, 0, sizeof(hdev->eir));
533                         memset(&cp, 0, sizeof(cp));
534
535                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 hci_setup_inquiry_mode(req);
541
542         if (lmp_inq_tx_pwr_capable(hdev))
543                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
544
545         if (lmp_ext_feat_capable(hdev)) {
546                 struct hci_cp_read_local_ext_features cp;
547
548                 cp.page = 0x01;
549                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550                             sizeof(cp), &cp);
551         }
552
553         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554                 u8 enable = 1;
555                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556                             &enable);
557         }
558 }
559
560 static void hci_setup_link_policy(struct hci_request *req)
561 {
562         struct hci_dev *hdev = req->hdev;
563         struct hci_cp_write_def_link_policy cp;
564         u16 link_policy = 0;
565
566         if (lmp_rswitch_capable(hdev))
567                 link_policy |= HCI_LP_RSWITCH;
568         if (lmp_hold_capable(hdev))
569                 link_policy |= HCI_LP_HOLD;
570         if (lmp_sniff_capable(hdev))
571                 link_policy |= HCI_LP_SNIFF;
572         if (lmp_park_capable(hdev))
573                 link_policy |= HCI_LP_PARK;
574
575         cp.policy = cpu_to_le16(link_policy);
576         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
577 }
578
579 static void hci_set_le_support(struct hci_request *req)
580 {
581         struct hci_dev *hdev = req->hdev;
582         struct hci_cp_write_le_host_supported cp;
583
584         /* LE-only devices do not support explicit enablement */
585         if (!lmp_bredr_capable(hdev))
586                 return;
587
588         memset(&cp, 0, sizeof(cp));
589
590         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
591                 cp.le = 0x01;
592                 cp.simul = lmp_le_br_capable(hdev);
593         }
594
595         if (cp.le != lmp_host_le_capable(hdev))
596                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
597                             &cp);
598 }
599
600 static void hci_init3_req(struct hci_request *req, unsigned long opt)
601 {
602         struct hci_dev *hdev = req->hdev;
603         u8 p;
604
605         if (hdev->commands[5] & 0x10)
606                 hci_setup_link_policy(req);
607
608         if (lmp_le_capable(hdev)) {
609                 hci_set_le_support(req);
610                 hci_update_ad(req);
611         }
612
613         /* Read features beyond page 1 if available */
614         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
615                 struct hci_cp_read_local_ext_features cp;
616
617                 cp.page = p;
618                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
619                             sizeof(cp), &cp);
620         }
621 }
622
623 static int __hci_init(struct hci_dev *hdev)
624 {
625         int err;
626
627         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
628         if (err < 0)
629                 return err;
630
631         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
632          * BR/EDR/LE type controllers. AMP controllers only need the
633          * first stage init.
634          */
635         if (hdev->dev_type != HCI_BREDR)
636                 return 0;
637
638         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
639         if (err < 0)
640                 return err;
641
642         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
643 }
644
645 static void hci_scan_req(struct hci_request *req, unsigned long opt)
646 {
647         __u8 scan = opt;
648
649         BT_DBG("%s %x", req->hdev->name, scan);
650
651         /* Inquiry and Page scans */
652         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
653 }
654
655 static void hci_auth_req(struct hci_request *req, unsigned long opt)
656 {
657         __u8 auth = opt;
658
659         BT_DBG("%s %x", req->hdev->name, auth);
660
661         /* Authentication */
662         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
663 }
664
665 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
666 {
667         __u8 encrypt = opt;
668
669         BT_DBG("%s %x", req->hdev->name, encrypt);
670
671         /* Encryption */
672         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
673 }
674
675 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
676 {
677         __le16 policy = cpu_to_le16(opt);
678
679         BT_DBG("%s %x", req->hdev->name, policy);
680
681         /* Default link policy */
682         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
683 }
684
685 /* Get HCI device by index.
686  * Device is held on return. */
687 struct hci_dev *hci_dev_get(int index)
688 {
689         struct hci_dev *hdev = NULL, *d;
690
691         BT_DBG("%d", index);
692
693         if (index < 0)
694                 return NULL;
695
696         read_lock(&hci_dev_list_lock);
697         list_for_each_entry(d, &hci_dev_list, list) {
698                 if (d->id == index) {
699                         hdev = hci_dev_hold(d);
700                         break;
701                 }
702         }
703         read_unlock(&hci_dev_list_lock);
704         return hdev;
705 }
706
707 /* ---- Inquiry support ---- */
708
709 bool hci_discovery_active(struct hci_dev *hdev)
710 {
711         struct discovery_state *discov = &hdev->discovery;
712
713         switch (discov->state) {
714         case DISCOVERY_FINDING:
715         case DISCOVERY_RESOLVING:
716                 return true;
717
718         default:
719                 return false;
720         }
721 }
722
723 void hci_discovery_set_state(struct hci_dev *hdev, int state)
724 {
725         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
726
727         if (hdev->discovery.state == state)
728                 return;
729
730         switch (state) {
731         case DISCOVERY_STOPPED:
732                 if (hdev->discovery.state != DISCOVERY_STARTING)
733                         mgmt_discovering(hdev, 0);
734                 break;
735         case DISCOVERY_STARTING:
736                 break;
737         case DISCOVERY_FINDING:
738                 mgmt_discovering(hdev, 1);
739                 break;
740         case DISCOVERY_RESOLVING:
741                 break;
742         case DISCOVERY_STOPPING:
743                 break;
744         }
745
746         hdev->discovery.state = state;
747 }
748
749 static void inquiry_cache_flush(struct hci_dev *hdev)
750 {
751         struct discovery_state *cache = &hdev->discovery;
752         struct inquiry_entry *p, *n;
753
754         list_for_each_entry_safe(p, n, &cache->all, all) {
755                 list_del(&p->all);
756                 kfree(p);
757         }
758
759         INIT_LIST_HEAD(&cache->unknown);
760         INIT_LIST_HEAD(&cache->resolve);
761 }
762
763 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
764                                                bdaddr_t *bdaddr)
765 {
766         struct discovery_state *cache = &hdev->discovery;
767         struct inquiry_entry *e;
768
769         BT_DBG("cache %p, %pMR", cache, bdaddr);
770
771         list_for_each_entry(e, &cache->all, all) {
772                 if (!bacmp(&e->data.bdaddr, bdaddr))
773                         return e;
774         }
775
776         return NULL;
777 }
778
779 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
780                                                        bdaddr_t *bdaddr)
781 {
782         struct discovery_state *cache = &hdev->discovery;
783         struct inquiry_entry *e;
784
785         BT_DBG("cache %p, %pMR", cache, bdaddr);
786
787         list_for_each_entry(e, &cache->unknown, list) {
788                 if (!bacmp(&e->data.bdaddr, bdaddr))
789                         return e;
790         }
791
792         return NULL;
793 }
794
795 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
796                                                        bdaddr_t *bdaddr,
797                                                        int state)
798 {
799         struct discovery_state *cache = &hdev->discovery;
800         struct inquiry_entry *e;
801
802         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
803
804         list_for_each_entry(e, &cache->resolve, list) {
805                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
806                         return e;
807                 if (!bacmp(&e->data.bdaddr, bdaddr))
808                         return e;
809         }
810
811         return NULL;
812 }
813
814 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
815                                       struct inquiry_entry *ie)
816 {
817         struct discovery_state *cache = &hdev->discovery;
818         struct list_head *pos = &cache->resolve;
819         struct inquiry_entry *p;
820
821         list_del(&ie->list);
822
823         list_for_each_entry(p, &cache->resolve, list) {
824                 if (p->name_state != NAME_PENDING &&
825                     abs(p->data.rssi) >= abs(ie->data.rssi))
826                         break;
827                 pos = &p->list;
828         }
829
830         list_add(&ie->list, pos);
831 }
832
833 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
834                               bool name_known, bool *ssp)
835 {
836         struct discovery_state *cache = &hdev->discovery;
837         struct inquiry_entry *ie;
838
839         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
840
841         hci_remove_remote_oob_data(hdev, &data->bdaddr);
842
843         if (ssp)
844                 *ssp = data->ssp_mode;
845
846         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
847         if (ie) {
848                 if (ie->data.ssp_mode && ssp)
849                         *ssp = true;
850
851                 if (ie->name_state == NAME_NEEDED &&
852                     data->rssi != ie->data.rssi) {
853                         ie->data.rssi = data->rssi;
854                         hci_inquiry_cache_update_resolve(hdev, ie);
855                 }
856
857                 goto update;
858         }
859
860         /* Entry not in the cache. Add new one. */
861         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
862         if (!ie)
863                 return false;
864
865         list_add(&ie->all, &cache->all);
866
867         if (name_known) {
868                 ie->name_state = NAME_KNOWN;
869         } else {
870                 ie->name_state = NAME_NOT_KNOWN;
871                 list_add(&ie->list, &cache->unknown);
872         }
873
874 update:
875         if (name_known && ie->name_state != NAME_KNOWN &&
876             ie->name_state != NAME_PENDING) {
877                 ie->name_state = NAME_KNOWN;
878                 list_del(&ie->list);
879         }
880
881         memcpy(&ie->data, data, sizeof(*data));
882         ie->timestamp = jiffies;
883         cache->timestamp = jiffies;
884
885         if (ie->name_state == NAME_NOT_KNOWN)
886                 return false;
887
888         return true;
889 }
890
891 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
892 {
893         struct discovery_state *cache = &hdev->discovery;
894         struct inquiry_info *info = (struct inquiry_info *) buf;
895         struct inquiry_entry *e;
896         int copied = 0;
897
898         list_for_each_entry(e, &cache->all, all) {
899                 struct inquiry_data *data = &e->data;
900
901                 if (copied >= num)
902                         break;
903
904                 bacpy(&info->bdaddr, &data->bdaddr);
905                 info->pscan_rep_mode    = data->pscan_rep_mode;
906                 info->pscan_period_mode = data->pscan_period_mode;
907                 info->pscan_mode        = data->pscan_mode;
908                 memcpy(info->dev_class, data->dev_class, 3);
909                 info->clock_offset      = data->clock_offset;
910
911                 info++;
912                 copied++;
913         }
914
915         BT_DBG("cache %p, copied %d", cache, copied);
916         return copied;
917 }
918
919 static void hci_inq_req(struct hci_request *req, unsigned long opt)
920 {
921         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
922         struct hci_dev *hdev = req->hdev;
923         struct hci_cp_inquiry cp;
924
925         BT_DBG("%s", hdev->name);
926
927         if (test_bit(HCI_INQUIRY, &hdev->flags))
928                 return;
929
930         /* Start Inquiry */
931         memcpy(&cp.lap, &ir->lap, 3);
932         cp.length  = ir->length;
933         cp.num_rsp = ir->num_rsp;
934         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
935 }
936
937 static int wait_inquiry(void *word)
938 {
939         schedule();
940         return signal_pending(current);
941 }
942
943 int hci_inquiry(void __user *arg)
944 {
945         __u8 __user *ptr = arg;
946         struct hci_inquiry_req ir;
947         struct hci_dev *hdev;
948         int err = 0, do_inquiry = 0, max_rsp;
949         long timeo;
950         __u8 *buf;
951
952         if (copy_from_user(&ir, ptr, sizeof(ir)))
953                 return -EFAULT;
954
955         hdev = hci_dev_get(ir.dev_id);
956         if (!hdev)
957                 return -ENODEV;
958
959         hci_dev_lock(hdev);
960         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
961             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
962                 inquiry_cache_flush(hdev);
963                 do_inquiry = 1;
964         }
965         hci_dev_unlock(hdev);
966
967         timeo = ir.length * msecs_to_jiffies(2000);
968
969         if (do_inquiry) {
970                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
971                                    timeo);
972                 if (err < 0)
973                         goto done;
974
975                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
976                  * cleared). If it is interrupted by a signal, return -EINTR.
977                  */
978                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
979                                 TASK_INTERRUPTIBLE))
980                         return -EINTR;
981         }
982
983         /* for unlimited number of responses we will use buffer with
984          * 255 entries
985          */
986         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
987
988         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
989          * copy it to the user space.
990          */
991         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
992         if (!buf) {
993                 err = -ENOMEM;
994                 goto done;
995         }
996
997         hci_dev_lock(hdev);
998         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
999         hci_dev_unlock(hdev);
1000
1001         BT_DBG("num_rsp %d", ir.num_rsp);
1002
1003         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1004                 ptr += sizeof(ir);
1005                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1006                                  ir.num_rsp))
1007                         err = -EFAULT;
1008         } else
1009                 err = -EFAULT;
1010
1011         kfree(buf);
1012
1013 done:
1014         hci_dev_put(hdev);
1015         return err;
1016 }
1017
1018 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1019 {
1020         u8 ad_len = 0, flags = 0;
1021         size_t name_len;
1022
1023         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1024                 flags |= LE_AD_GENERAL;
1025
1026         if (!lmp_bredr_capable(hdev))
1027                 flags |= LE_AD_NO_BREDR;
1028
1029         if (lmp_le_br_capable(hdev))
1030                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1031
1032         if (lmp_host_le_br_capable(hdev))
1033                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1034
1035         if (flags) {
1036                 BT_DBG("adv flags 0x%02x", flags);
1037
1038                 ptr[0] = 2;
1039                 ptr[1] = EIR_FLAGS;
1040                 ptr[2] = flags;
1041
1042                 ad_len += 3;
1043                 ptr += 3;
1044         }
1045
1046         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1047                 ptr[0] = 2;
1048                 ptr[1] = EIR_TX_POWER;
1049                 ptr[2] = (u8) hdev->adv_tx_power;
1050
1051                 ad_len += 3;
1052                 ptr += 3;
1053         }
1054
1055         name_len = strlen(hdev->dev_name);
1056         if (name_len > 0) {
1057                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1058
1059                 if (name_len > max_len) {
1060                         name_len = max_len;
1061                         ptr[1] = EIR_NAME_SHORT;
1062                 } else
1063                         ptr[1] = EIR_NAME_COMPLETE;
1064
1065                 ptr[0] = name_len + 1;
1066
1067                 memcpy(ptr + 2, hdev->dev_name, name_len);
1068
1069                 ad_len += (name_len + 2);
1070                 ptr += (name_len + 2);
1071         }
1072
1073         return ad_len;
1074 }
1075
1076 void hci_update_ad(struct hci_request *req)
1077 {
1078         struct hci_dev *hdev = req->hdev;
1079         struct hci_cp_le_set_adv_data cp;
1080         u8 len;
1081
1082         if (!lmp_le_capable(hdev))
1083                 return;
1084
1085         memset(&cp, 0, sizeof(cp));
1086
1087         len = create_ad(hdev, cp.data);
1088
1089         if (hdev->adv_data_len == len &&
1090             memcmp(cp.data, hdev->adv_data, len) == 0)
1091                 return;
1092
1093         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1094         hdev->adv_data_len = len;
1095
1096         cp.length = len;
1097
1098         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1099 }
1100
1101 /* ---- HCI ioctl helpers ---- */
1102
1103 int hci_dev_open(__u16 dev)
1104 {
1105         struct hci_dev *hdev;
1106         int ret = 0;
1107
1108         hdev = hci_dev_get(dev);
1109         if (!hdev)
1110                 return -ENODEV;
1111
1112         BT_DBG("%s %p", hdev->name, hdev);
1113
1114         hci_req_lock(hdev);
1115
1116         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1117                 ret = -ENODEV;
1118                 goto done;
1119         }
1120
1121         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1122                 ret = -ERFKILL;
1123                 goto done;
1124         }
1125
1126         if (test_bit(HCI_UP, &hdev->flags)) {
1127                 ret = -EALREADY;
1128                 goto done;
1129         }
1130
1131         if (hdev->open(hdev)) {
1132                 ret = -EIO;
1133                 goto done;
1134         }
1135
1136         atomic_set(&hdev->cmd_cnt, 1);
1137         set_bit(HCI_INIT, &hdev->flags);
1138
1139         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1140                 ret = hdev->setup(hdev);
1141
1142         if (!ret) {
1143                 /* Treat all non BR/EDR controllers as raw devices if
1144                  * enable_hs is not set.
1145                  */
1146                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1147                         set_bit(HCI_RAW, &hdev->flags);
1148
1149                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1150                         set_bit(HCI_RAW, &hdev->flags);
1151
1152                 if (!test_bit(HCI_RAW, &hdev->flags))
1153                         ret = __hci_init(hdev);
1154         }
1155
1156         clear_bit(HCI_INIT, &hdev->flags);
1157
1158         if (!ret) {
1159                 hci_dev_hold(hdev);
1160                 set_bit(HCI_UP, &hdev->flags);
1161                 hci_notify(hdev, HCI_DEV_UP);
1162                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1163                     mgmt_valid_hdev(hdev)) {
1164                         hci_dev_lock(hdev);
1165                         mgmt_powered(hdev, 1);
1166                         hci_dev_unlock(hdev);
1167                 }
1168         } else {
1169                 /* Init failed, cleanup */
1170                 flush_work(&hdev->tx_work);
1171                 flush_work(&hdev->cmd_work);
1172                 flush_work(&hdev->rx_work);
1173
1174                 skb_queue_purge(&hdev->cmd_q);
1175                 skb_queue_purge(&hdev->rx_q);
1176
1177                 if (hdev->flush)
1178                         hdev->flush(hdev);
1179
1180                 if (hdev->sent_cmd) {
1181                         kfree_skb(hdev->sent_cmd);
1182                         hdev->sent_cmd = NULL;
1183                 }
1184
1185                 hdev->close(hdev);
1186                 hdev->flags = 0;
1187         }
1188
1189 done:
1190         hci_req_unlock(hdev);
1191         hci_dev_put(hdev);
1192         return ret;
1193 }
1194
1195 static int hci_dev_do_close(struct hci_dev *hdev)
1196 {
1197         BT_DBG("%s %p", hdev->name, hdev);
1198
1199         cancel_work_sync(&hdev->le_scan);
1200
1201         cancel_delayed_work(&hdev->power_off);
1202
1203         hci_req_cancel(hdev, ENODEV);
1204         hci_req_lock(hdev);
1205
1206         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1207                 del_timer_sync(&hdev->cmd_timer);
1208                 hci_req_unlock(hdev);
1209                 return 0;
1210         }
1211
1212         /* Flush RX and TX works */
1213         flush_work(&hdev->tx_work);
1214         flush_work(&hdev->rx_work);
1215
1216         if (hdev->discov_timeout > 0) {
1217                 cancel_delayed_work(&hdev->discov_off);
1218                 hdev->discov_timeout = 0;
1219                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1220         }
1221
1222         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1223                 cancel_delayed_work(&hdev->service_cache);
1224
1225         cancel_delayed_work_sync(&hdev->le_scan_disable);
1226
1227         hci_dev_lock(hdev);
1228         inquiry_cache_flush(hdev);
1229         hci_conn_hash_flush(hdev);
1230         hci_dev_unlock(hdev);
1231
1232         hci_notify(hdev, HCI_DEV_DOWN);
1233
1234         if (hdev->flush)
1235                 hdev->flush(hdev);
1236
1237         /* Reset device */
1238         skb_queue_purge(&hdev->cmd_q);
1239         atomic_set(&hdev->cmd_cnt, 1);
1240         if (!test_bit(HCI_RAW, &hdev->flags) &&
1241             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1242                 set_bit(HCI_INIT, &hdev->flags);
1243                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1244                 clear_bit(HCI_INIT, &hdev->flags);
1245         }
1246
1247         /* flush cmd  work */
1248         flush_work(&hdev->cmd_work);
1249
1250         /* Drop queues */
1251         skb_queue_purge(&hdev->rx_q);
1252         skb_queue_purge(&hdev->cmd_q);
1253         skb_queue_purge(&hdev->raw_q);
1254
1255         /* Drop last sent command */
1256         if (hdev->sent_cmd) {
1257                 del_timer_sync(&hdev->cmd_timer);
1258                 kfree_skb(hdev->sent_cmd);
1259                 hdev->sent_cmd = NULL;
1260         }
1261
1262         kfree_skb(hdev->recv_evt);
1263         hdev->recv_evt = NULL;
1264
1265         /* After this point our queues are empty
1266          * and no tasks are scheduled. */
1267         hdev->close(hdev);
1268
1269         /* Clear flags */
1270         hdev->flags = 0;
1271         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1272
1273         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1274             mgmt_valid_hdev(hdev)) {
1275                 hci_dev_lock(hdev);
1276                 mgmt_powered(hdev, 0);
1277                 hci_dev_unlock(hdev);
1278         }
1279
1280         /* Controller radio is available but is currently powered down */
1281         hdev->amp_status = 0;
1282
1283         memset(hdev->eir, 0, sizeof(hdev->eir));
1284         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1285
1286         hci_req_unlock(hdev);
1287
1288         hci_dev_put(hdev);
1289         return 0;
1290 }
1291
1292 int hci_dev_close(__u16 dev)
1293 {
1294         struct hci_dev *hdev;
1295         int err;
1296
1297         hdev = hci_dev_get(dev);
1298         if (!hdev)
1299                 return -ENODEV;
1300
1301         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1302                 cancel_delayed_work(&hdev->power_off);
1303
1304         err = hci_dev_do_close(hdev);
1305
1306         hci_dev_put(hdev);
1307         return err;
1308 }
1309
1310 int hci_dev_reset(__u16 dev)
1311 {
1312         struct hci_dev *hdev;
1313         int ret = 0;
1314
1315         hdev = hci_dev_get(dev);
1316         if (!hdev)
1317                 return -ENODEV;
1318
1319         hci_req_lock(hdev);
1320
1321         if (!test_bit(HCI_UP, &hdev->flags))
1322                 goto done;
1323
1324         /* Drop queues */
1325         skb_queue_purge(&hdev->rx_q);
1326         skb_queue_purge(&hdev->cmd_q);
1327
1328         hci_dev_lock(hdev);
1329         inquiry_cache_flush(hdev);
1330         hci_conn_hash_flush(hdev);
1331         hci_dev_unlock(hdev);
1332
1333         if (hdev->flush)
1334                 hdev->flush(hdev);
1335
1336         atomic_set(&hdev->cmd_cnt, 1);
1337         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1338
1339         if (!test_bit(HCI_RAW, &hdev->flags))
1340                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1341
1342 done:
1343         hci_req_unlock(hdev);
1344         hci_dev_put(hdev);
1345         return ret;
1346 }
1347
1348 int hci_dev_reset_stat(__u16 dev)
1349 {
1350         struct hci_dev *hdev;
1351         int ret = 0;
1352
1353         hdev = hci_dev_get(dev);
1354         if (!hdev)
1355                 return -ENODEV;
1356
1357         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1358
1359         hci_dev_put(hdev);
1360
1361         return ret;
1362 }
1363
1364 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1365 {
1366         struct hci_dev *hdev;
1367         struct hci_dev_req dr;
1368         int err = 0;
1369
1370         if (copy_from_user(&dr, arg, sizeof(dr)))
1371                 return -EFAULT;
1372
1373         hdev = hci_dev_get(dr.dev_id);
1374         if (!hdev)
1375                 return -ENODEV;
1376
1377         switch (cmd) {
1378         case HCISETAUTH:
1379                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1380                                    HCI_INIT_TIMEOUT);
1381                 break;
1382
1383         case HCISETENCRYPT:
1384                 if (!lmp_encrypt_capable(hdev)) {
1385                         err = -EOPNOTSUPP;
1386                         break;
1387                 }
1388
1389                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1390                         /* Auth must be enabled first */
1391                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1392                                            HCI_INIT_TIMEOUT);
1393                         if (err)
1394                                 break;
1395                 }
1396
1397                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1398                                    HCI_INIT_TIMEOUT);
1399                 break;
1400
1401         case HCISETSCAN:
1402                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1403                                    HCI_INIT_TIMEOUT);
1404                 break;
1405
1406         case HCISETLINKPOL:
1407                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1408                                    HCI_INIT_TIMEOUT);
1409                 break;
1410
1411         case HCISETLINKMODE:
1412                 hdev->link_mode = ((__u16) dr.dev_opt) &
1413                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1414                 break;
1415
1416         case HCISETPTYPE:
1417                 hdev->pkt_type = (__u16) dr.dev_opt;
1418                 break;
1419
1420         case HCISETACLMTU:
1421                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1422                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1423                 break;
1424
1425         case HCISETSCOMTU:
1426                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1427                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1428                 break;
1429
1430         default:
1431                 err = -EINVAL;
1432                 break;
1433         }
1434
1435         hci_dev_put(hdev);
1436         return err;
1437 }
1438
1439 int hci_get_dev_list(void __user *arg)
1440 {
1441         struct hci_dev *hdev;
1442         struct hci_dev_list_req *dl;
1443         struct hci_dev_req *dr;
1444         int n = 0, size, err;
1445         __u16 dev_num;
1446
1447         if (get_user(dev_num, (__u16 __user *) arg))
1448                 return -EFAULT;
1449
1450         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1451                 return -EINVAL;
1452
1453         size = sizeof(*dl) + dev_num * sizeof(*dr);
1454
1455         dl = kzalloc(size, GFP_KERNEL);
1456         if (!dl)
1457                 return -ENOMEM;
1458
1459         dr = dl->dev_req;
1460
1461         read_lock(&hci_dev_list_lock);
1462         list_for_each_entry(hdev, &hci_dev_list, list) {
1463                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1464                         cancel_delayed_work(&hdev->power_off);
1465
1466                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1467                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1468
1469                 (dr + n)->dev_id  = hdev->id;
1470                 (dr + n)->dev_opt = hdev->flags;
1471
1472                 if (++n >= dev_num)
1473                         break;
1474         }
1475         read_unlock(&hci_dev_list_lock);
1476
1477         dl->dev_num = n;
1478         size = sizeof(*dl) + n * sizeof(*dr);
1479
1480         err = copy_to_user(arg, dl, size);
1481         kfree(dl);
1482
1483         return err ? -EFAULT : 0;
1484 }
1485
1486 int hci_get_dev_info(void __user *arg)
1487 {
1488         struct hci_dev *hdev;
1489         struct hci_dev_info di;
1490         int err = 0;
1491
1492         if (copy_from_user(&di, arg, sizeof(di)))
1493                 return -EFAULT;
1494
1495         hdev = hci_dev_get(di.dev_id);
1496         if (!hdev)
1497                 return -ENODEV;
1498
1499         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500                 cancel_delayed_work_sync(&hdev->power_off);
1501
1502         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1503                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1504
1505         strcpy(di.name, hdev->name);
1506         di.bdaddr   = hdev->bdaddr;
1507         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1508         di.flags    = hdev->flags;
1509         di.pkt_type = hdev->pkt_type;
1510         if (lmp_bredr_capable(hdev)) {
1511                 di.acl_mtu  = hdev->acl_mtu;
1512                 di.acl_pkts = hdev->acl_pkts;
1513                 di.sco_mtu  = hdev->sco_mtu;
1514                 di.sco_pkts = hdev->sco_pkts;
1515         } else {
1516                 di.acl_mtu  = hdev->le_mtu;
1517                 di.acl_pkts = hdev->le_pkts;
1518                 di.sco_mtu  = 0;
1519                 di.sco_pkts = 0;
1520         }
1521         di.link_policy = hdev->link_policy;
1522         di.link_mode   = hdev->link_mode;
1523
1524         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1525         memcpy(&di.features, &hdev->features, sizeof(di.features));
1526
1527         if (copy_to_user(arg, &di, sizeof(di)))
1528                 err = -EFAULT;
1529
1530         hci_dev_put(hdev);
1531
1532         return err;
1533 }
1534
1535 /* ---- Interface to HCI drivers ---- */
1536
1537 static int hci_rfkill_set_block(void *data, bool blocked)
1538 {
1539         struct hci_dev *hdev = data;
1540
1541         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1542
1543         if (!blocked)
1544                 return 0;
1545
1546         hci_dev_do_close(hdev);
1547
1548         return 0;
1549 }
1550
1551 static const struct rfkill_ops hci_rfkill_ops = {
1552         .set_block = hci_rfkill_set_block,
1553 };
1554
1555 static void hci_power_on(struct work_struct *work)
1556 {
1557         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1558
1559         BT_DBG("%s", hdev->name);
1560
1561         if (hci_dev_open(hdev->id) < 0)
1562                 return;
1563
1564         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1565                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1566                                    HCI_AUTO_OFF_TIMEOUT);
1567
1568         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1569                 mgmt_index_added(hdev);
1570 }
1571
1572 static void hci_power_off(struct work_struct *work)
1573 {
1574         struct hci_dev *hdev = container_of(work, struct hci_dev,
1575                                             power_off.work);
1576
1577         BT_DBG("%s", hdev->name);
1578
1579         hci_dev_do_close(hdev);
1580 }
1581
1582 static void hci_discov_off(struct work_struct *work)
1583 {
1584         struct hci_dev *hdev;
1585         u8 scan = SCAN_PAGE;
1586
1587         hdev = container_of(work, struct hci_dev, discov_off.work);
1588
1589         BT_DBG("%s", hdev->name);
1590
1591         hci_dev_lock(hdev);
1592
1593         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1594
1595         hdev->discov_timeout = 0;
1596
1597         hci_dev_unlock(hdev);
1598 }
1599
1600 int hci_uuids_clear(struct hci_dev *hdev)
1601 {
1602         struct bt_uuid *uuid, *tmp;
1603
1604         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1605                 list_del(&uuid->list);
1606                 kfree(uuid);
1607         }
1608
1609         return 0;
1610 }
1611
1612 int hci_link_keys_clear(struct hci_dev *hdev)
1613 {
1614         struct list_head *p, *n;
1615
1616         list_for_each_safe(p, n, &hdev->link_keys) {
1617                 struct link_key *key;
1618
1619                 key = list_entry(p, struct link_key, list);
1620
1621                 list_del(p);
1622                 kfree(key);
1623         }
1624
1625         return 0;
1626 }
1627
1628 int hci_smp_ltks_clear(struct hci_dev *hdev)
1629 {
1630         struct smp_ltk *k, *tmp;
1631
1632         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1633                 list_del(&k->list);
1634                 kfree(k);
1635         }
1636
1637         return 0;
1638 }
1639
1640 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1641 {
1642         struct link_key *k;
1643
1644         list_for_each_entry(k, &hdev->link_keys, list)
1645                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1646                         return k;
1647
1648         return NULL;
1649 }
1650
1651 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1652                                u8 key_type, u8 old_key_type)
1653 {
1654         /* Legacy key */
1655         if (key_type < 0x03)
1656                 return true;
1657
1658         /* Debug keys are insecure so don't store them persistently */
1659         if (key_type == HCI_LK_DEBUG_COMBINATION)
1660                 return false;
1661
1662         /* Changed combination key and there's no previous one */
1663         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1664                 return false;
1665
1666         /* Security mode 3 case */
1667         if (!conn)
1668                 return true;
1669
1670         /* Neither local nor remote side had no-bonding as requirement */
1671         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1672                 return true;
1673
1674         /* Local side had dedicated bonding as requirement */
1675         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1676                 return true;
1677
1678         /* Remote side had dedicated bonding as requirement */
1679         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1680                 return true;
1681
1682         /* If none of the above criteria match, then don't store the key
1683          * persistently */
1684         return false;
1685 }
1686
1687 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1688 {
1689         struct smp_ltk *k;
1690
1691         list_for_each_entry(k, &hdev->long_term_keys, list) {
1692                 if (k->ediv != ediv ||
1693                     memcmp(rand, k->rand, sizeof(k->rand)))
1694                         continue;
1695
1696                 return k;
1697         }
1698
1699         return NULL;
1700 }
1701
1702 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1703                                      u8 addr_type)
1704 {
1705         struct smp_ltk *k;
1706
1707         list_for_each_entry(k, &hdev->long_term_keys, list)
1708                 if (addr_type == k->bdaddr_type &&
1709                     bacmp(bdaddr, &k->bdaddr) == 0)
1710                         return k;
1711
1712         return NULL;
1713 }
1714
1715 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1716                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1717 {
1718         struct link_key *key, *old_key;
1719         u8 old_key_type;
1720         bool persistent;
1721
1722         old_key = hci_find_link_key(hdev, bdaddr);
1723         if (old_key) {
1724                 old_key_type = old_key->type;
1725                 key = old_key;
1726         } else {
1727                 old_key_type = conn ? conn->key_type : 0xff;
1728                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1729                 if (!key)
1730                         return -ENOMEM;
1731                 list_add(&key->list, &hdev->link_keys);
1732         }
1733
1734         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1735
1736         /* Some buggy controller combinations generate a changed
1737          * combination key for legacy pairing even when there's no
1738          * previous key */
1739         if (type == HCI_LK_CHANGED_COMBINATION &&
1740             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1741                 type = HCI_LK_COMBINATION;
1742                 if (conn)
1743                         conn->key_type = type;
1744         }
1745
1746         bacpy(&key->bdaddr, bdaddr);
1747         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1748         key->pin_len = pin_len;
1749
1750         if (type == HCI_LK_CHANGED_COMBINATION)
1751                 key->type = old_key_type;
1752         else
1753                 key->type = type;
1754
1755         if (!new_key)
1756                 return 0;
1757
1758         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1759
1760         mgmt_new_link_key(hdev, key, persistent);
1761
1762         if (conn)
1763                 conn->flush_key = !persistent;
1764
1765         return 0;
1766 }
1767
1768 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1769                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1770                 ediv, u8 rand[8])
1771 {
1772         struct smp_ltk *key, *old_key;
1773
1774         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1775                 return 0;
1776
1777         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1778         if (old_key)
1779                 key = old_key;
1780         else {
1781                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1782                 if (!key)
1783                         return -ENOMEM;
1784                 list_add(&key->list, &hdev->long_term_keys);
1785         }
1786
1787         bacpy(&key->bdaddr, bdaddr);
1788         key->bdaddr_type = addr_type;
1789         memcpy(key->val, tk, sizeof(key->val));
1790         key->authenticated = authenticated;
1791         key->ediv = ediv;
1792         key->enc_size = enc_size;
1793         key->type = type;
1794         memcpy(key->rand, rand, sizeof(key->rand));
1795
1796         if (!new_key)
1797                 return 0;
1798
1799         if (type & HCI_SMP_LTK)
1800                 mgmt_new_ltk(hdev, key, 1);
1801
1802         return 0;
1803 }
1804
1805 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1806 {
1807         struct link_key *key;
1808
1809         key = hci_find_link_key(hdev, bdaddr);
1810         if (!key)
1811                 return -ENOENT;
1812
1813         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1814
1815         list_del(&key->list);
1816         kfree(key);
1817
1818         return 0;
1819 }
1820
1821 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1822 {
1823         struct smp_ltk *k, *tmp;
1824
1825         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1826                 if (bacmp(bdaddr, &k->bdaddr))
1827                         continue;
1828
1829                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1830
1831                 list_del(&k->list);
1832                 kfree(k);
1833         }
1834
1835         return 0;
1836 }
1837
1838 /* HCI command timer function */
1839 static void hci_cmd_timeout(unsigned long arg)
1840 {
1841         struct hci_dev *hdev = (void *) arg;
1842
1843         if (hdev->sent_cmd) {
1844                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1845                 u16 opcode = __le16_to_cpu(sent->opcode);
1846
1847                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1848         } else {
1849                 BT_ERR("%s command tx timeout", hdev->name);
1850         }
1851
1852         atomic_set(&hdev->cmd_cnt, 1);
1853         queue_work(hdev->workqueue, &hdev->cmd_work);
1854 }
1855
1856 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1857                                           bdaddr_t *bdaddr)
1858 {
1859         struct oob_data *data;
1860
1861         list_for_each_entry(data, &hdev->remote_oob_data, list)
1862                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1863                         return data;
1864
1865         return NULL;
1866 }
1867
1868 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1869 {
1870         struct oob_data *data;
1871
1872         data = hci_find_remote_oob_data(hdev, bdaddr);
1873         if (!data)
1874                 return -ENOENT;
1875
1876         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1877
1878         list_del(&data->list);
1879         kfree(data);
1880
1881         return 0;
1882 }
1883
1884 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1885 {
1886         struct oob_data *data, *n;
1887
1888         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1889                 list_del(&data->list);
1890                 kfree(data);
1891         }
1892
1893         return 0;
1894 }
1895
1896 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1897                             u8 *randomizer)
1898 {
1899         struct oob_data *data;
1900
1901         data = hci_find_remote_oob_data(hdev, bdaddr);
1902
1903         if (!data) {
1904                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1905                 if (!data)
1906                         return -ENOMEM;
1907
1908                 bacpy(&data->bdaddr, bdaddr);
1909                 list_add(&data->list, &hdev->remote_oob_data);
1910         }
1911
1912         memcpy(data->hash, hash, sizeof(data->hash));
1913         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1914
1915         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1916
1917         return 0;
1918 }
1919
1920 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1921 {
1922         struct bdaddr_list *b;
1923
1924         list_for_each_entry(b, &hdev->blacklist, list)
1925                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1926                         return b;
1927
1928         return NULL;
1929 }
1930
1931 int hci_blacklist_clear(struct hci_dev *hdev)
1932 {
1933         struct list_head *p, *n;
1934
1935         list_for_each_safe(p, n, &hdev->blacklist) {
1936                 struct bdaddr_list *b;
1937
1938                 b = list_entry(p, struct bdaddr_list, list);
1939
1940                 list_del(p);
1941                 kfree(b);
1942         }
1943
1944         return 0;
1945 }
1946
1947 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1948 {
1949         struct bdaddr_list *entry;
1950
1951         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1952                 return -EBADF;
1953
1954         if (hci_blacklist_lookup(hdev, bdaddr))
1955                 return -EEXIST;
1956
1957         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1958         if (!entry)
1959                 return -ENOMEM;
1960
1961         bacpy(&entry->bdaddr, bdaddr);
1962
1963         list_add(&entry->list, &hdev->blacklist);
1964
1965         return mgmt_device_blocked(hdev, bdaddr, type);
1966 }
1967
1968 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1969 {
1970         struct bdaddr_list *entry;
1971
1972         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1973                 return hci_blacklist_clear(hdev);
1974
1975         entry = hci_blacklist_lookup(hdev, bdaddr);
1976         if (!entry)
1977                 return -ENOENT;
1978
1979         list_del(&entry->list);
1980         kfree(entry);
1981
1982         return mgmt_device_unblocked(hdev, bdaddr, type);
1983 }
1984
1985 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1986 {
1987         struct le_scan_params *param =  (struct le_scan_params *) opt;
1988         struct hci_cp_le_set_scan_param cp;
1989
1990         memset(&cp, 0, sizeof(cp));
1991         cp.type = param->type;
1992         cp.interval = cpu_to_le16(param->interval);
1993         cp.window = cpu_to_le16(param->window);
1994
1995         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1996 }
1997
1998 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1999 {
2000         struct hci_cp_le_set_scan_enable cp;
2001
2002         memset(&cp, 0, sizeof(cp));
2003         cp.enable = LE_SCAN_ENABLE;
2004         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2005
2006         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2007 }
2008
2009 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2010                           u16 window, int timeout)
2011 {
2012         long timeo = msecs_to_jiffies(3000);
2013         struct le_scan_params param;
2014         int err;
2015
2016         BT_DBG("%s", hdev->name);
2017
2018         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2019                 return -EINPROGRESS;
2020
2021         param.type = type;
2022         param.interval = interval;
2023         param.window = window;
2024
2025         hci_req_lock(hdev);
2026
2027         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2028                              timeo);
2029         if (!err)
2030                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2031
2032         hci_req_unlock(hdev);
2033
2034         if (err < 0)
2035                 return err;
2036
2037         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2038                            timeout);
2039
2040         return 0;
2041 }
2042
2043 int hci_cancel_le_scan(struct hci_dev *hdev)
2044 {
2045         BT_DBG("%s", hdev->name);
2046
2047         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2048                 return -EALREADY;
2049
2050         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2051                 struct hci_cp_le_set_scan_enable cp;
2052
2053                 /* Send HCI command to disable LE Scan */
2054                 memset(&cp, 0, sizeof(cp));
2055                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2056         }
2057
2058         return 0;
2059 }
2060
2061 static void le_scan_disable_work(struct work_struct *work)
2062 {
2063         struct hci_dev *hdev = container_of(work, struct hci_dev,
2064                                             le_scan_disable.work);
2065         struct hci_cp_le_set_scan_enable cp;
2066
2067         BT_DBG("%s", hdev->name);
2068
2069         memset(&cp, 0, sizeof(cp));
2070
2071         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2072 }
2073
2074 static void le_scan_work(struct work_struct *work)
2075 {
2076         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2077         struct le_scan_params *param = &hdev->le_scan_params;
2078
2079         BT_DBG("%s", hdev->name);
2080
2081         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2082                        param->timeout);
2083 }
2084
2085 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2086                 int timeout)
2087 {
2088         struct le_scan_params *param = &hdev->le_scan_params;
2089
2090         BT_DBG("%s", hdev->name);
2091
2092         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2093                 return -ENOTSUPP;
2094
2095         if (work_busy(&hdev->le_scan))
2096                 return -EINPROGRESS;
2097
2098         param->type = type;
2099         param->interval = interval;
2100         param->window = window;
2101         param->timeout = timeout;
2102
2103         queue_work(system_long_wq, &hdev->le_scan);
2104
2105         return 0;
2106 }
2107
2108 /* Alloc HCI device */
2109 struct hci_dev *hci_alloc_dev(void)
2110 {
2111         struct hci_dev *hdev;
2112
2113         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2114         if (!hdev)
2115                 return NULL;
2116
2117         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2118         hdev->esco_type = (ESCO_HV1);
2119         hdev->link_mode = (HCI_LM_ACCEPT);
2120         hdev->io_capability = 0x03; /* No Input No Output */
2121         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2122         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2123
2124         hdev->sniff_max_interval = 800;
2125         hdev->sniff_min_interval = 80;
2126
2127         mutex_init(&hdev->lock);
2128         mutex_init(&hdev->req_lock);
2129
2130         INIT_LIST_HEAD(&hdev->mgmt_pending);
2131         INIT_LIST_HEAD(&hdev->blacklist);
2132         INIT_LIST_HEAD(&hdev->uuids);
2133         INIT_LIST_HEAD(&hdev->link_keys);
2134         INIT_LIST_HEAD(&hdev->long_term_keys);
2135         INIT_LIST_HEAD(&hdev->remote_oob_data);
2136         INIT_LIST_HEAD(&hdev->conn_hash.list);
2137
2138         INIT_WORK(&hdev->rx_work, hci_rx_work);
2139         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2140         INIT_WORK(&hdev->tx_work, hci_tx_work);
2141         INIT_WORK(&hdev->power_on, hci_power_on);
2142         INIT_WORK(&hdev->le_scan, le_scan_work);
2143
2144         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2145         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2146         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2147
2148         skb_queue_head_init(&hdev->rx_q);
2149         skb_queue_head_init(&hdev->cmd_q);
2150         skb_queue_head_init(&hdev->raw_q);
2151
2152         init_waitqueue_head(&hdev->req_wait_q);
2153
2154         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2155
2156         hci_init_sysfs(hdev);
2157         discovery_init(hdev);
2158
2159         return hdev;
2160 }
2161 EXPORT_SYMBOL(hci_alloc_dev);
2162
2163 /* Free HCI device */
2164 void hci_free_dev(struct hci_dev *hdev)
2165 {
2166         /* will free via device release */
2167         put_device(&hdev->dev);
2168 }
2169 EXPORT_SYMBOL(hci_free_dev);
2170
2171 /* Register HCI device */
2172 int hci_register_dev(struct hci_dev *hdev)
2173 {
2174         int id, error;
2175
2176         if (!hdev->open || !hdev->close)
2177                 return -EINVAL;
2178
2179         /* Do not allow HCI_AMP devices to register at index 0,
2180          * so the index can be used as the AMP controller ID.
2181          */
2182         switch (hdev->dev_type) {
2183         case HCI_BREDR:
2184                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2185                 break;
2186         case HCI_AMP:
2187                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2188                 break;
2189         default:
2190                 return -EINVAL;
2191         }
2192
2193         if (id < 0)
2194                 return id;
2195
2196         sprintf(hdev->name, "hci%d", id);
2197         hdev->id = id;
2198
2199         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2200
2201         write_lock(&hci_dev_list_lock);
2202         list_add(&hdev->list, &hci_dev_list);
2203         write_unlock(&hci_dev_list_lock);
2204
2205         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2206                                           WQ_MEM_RECLAIM, 1);
2207         if (!hdev->workqueue) {
2208                 error = -ENOMEM;
2209                 goto err;
2210         }
2211
2212         hdev->req_workqueue = alloc_workqueue(hdev->name,
2213                                               WQ_HIGHPRI | WQ_UNBOUND |
2214                                               WQ_MEM_RECLAIM, 1);
2215         if (!hdev->req_workqueue) {
2216                 destroy_workqueue(hdev->workqueue);
2217                 error = -ENOMEM;
2218                 goto err;
2219         }
2220
2221         error = hci_add_sysfs(hdev);
2222         if (error < 0)
2223                 goto err_wqueue;
2224
2225         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2226                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2227                                     hdev);
2228         if (hdev->rfkill) {
2229                 if (rfkill_register(hdev->rfkill) < 0) {
2230                         rfkill_destroy(hdev->rfkill);
2231                         hdev->rfkill = NULL;
2232                 }
2233         }
2234
2235         set_bit(HCI_SETUP, &hdev->dev_flags);
2236
2237         if (hdev->dev_type != HCI_AMP)
2238                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2239
2240         hci_notify(hdev, HCI_DEV_REG);
2241         hci_dev_hold(hdev);
2242
2243         queue_work(hdev->req_workqueue, &hdev->power_on);
2244
2245         return id;
2246
2247 err_wqueue:
2248         destroy_workqueue(hdev->workqueue);
2249         destroy_workqueue(hdev->req_workqueue);
2250 err:
2251         ida_simple_remove(&hci_index_ida, hdev->id);
2252         write_lock(&hci_dev_list_lock);
2253         list_del(&hdev->list);
2254         write_unlock(&hci_dev_list_lock);
2255
2256         return error;
2257 }
2258 EXPORT_SYMBOL(hci_register_dev);
2259
2260 /* Unregister HCI device */
2261 void hci_unregister_dev(struct hci_dev *hdev)
2262 {
2263         int i, id;
2264
2265         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2266
2267         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2268
2269         id = hdev->id;
2270
2271         write_lock(&hci_dev_list_lock);
2272         list_del(&hdev->list);
2273         write_unlock(&hci_dev_list_lock);
2274
2275         hci_dev_do_close(hdev);
2276
2277         for (i = 0; i < NUM_REASSEMBLY; i++)
2278                 kfree_skb(hdev->reassembly[i]);
2279
2280         cancel_work_sync(&hdev->power_on);
2281
2282         if (!test_bit(HCI_INIT, &hdev->flags) &&
2283             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2284                 hci_dev_lock(hdev);
2285                 mgmt_index_removed(hdev);
2286                 hci_dev_unlock(hdev);
2287         }
2288
2289         /* mgmt_index_removed should take care of emptying the
2290          * pending list */
2291         BUG_ON(!list_empty(&hdev->mgmt_pending));
2292
2293         hci_notify(hdev, HCI_DEV_UNREG);
2294
2295         if (hdev->rfkill) {
2296                 rfkill_unregister(hdev->rfkill);
2297                 rfkill_destroy(hdev->rfkill);
2298         }
2299
2300         hci_del_sysfs(hdev);
2301
2302         destroy_workqueue(hdev->workqueue);
2303         destroy_workqueue(hdev->req_workqueue);
2304
2305         hci_dev_lock(hdev);
2306         hci_blacklist_clear(hdev);
2307         hci_uuids_clear(hdev);
2308         hci_link_keys_clear(hdev);
2309         hci_smp_ltks_clear(hdev);
2310         hci_remote_oob_data_clear(hdev);
2311         hci_dev_unlock(hdev);
2312
2313         hci_dev_put(hdev);
2314
2315         ida_simple_remove(&hci_index_ida, id);
2316 }
2317 EXPORT_SYMBOL(hci_unregister_dev);
2318
2319 /* Suspend HCI device */
2320 int hci_suspend_dev(struct hci_dev *hdev)
2321 {
2322         hci_notify(hdev, HCI_DEV_SUSPEND);
2323         return 0;
2324 }
2325 EXPORT_SYMBOL(hci_suspend_dev);
2326
2327 /* Resume HCI device */
2328 int hci_resume_dev(struct hci_dev *hdev)
2329 {
2330         hci_notify(hdev, HCI_DEV_RESUME);
2331         return 0;
2332 }
2333 EXPORT_SYMBOL(hci_resume_dev);
2334
2335 /* Receive frame from HCI drivers */
2336 int hci_recv_frame(struct sk_buff *skb)
2337 {
2338         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2339         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2340                       && !test_bit(HCI_INIT, &hdev->flags))) {
2341                 kfree_skb(skb);
2342                 return -ENXIO;
2343         }
2344
2345         /* Incoming skb */
2346         bt_cb(skb)->incoming = 1;
2347
2348         /* Time stamp */
2349         __net_timestamp(skb);
2350
2351         skb_queue_tail(&hdev->rx_q, skb);
2352         queue_work(hdev->workqueue, &hdev->rx_work);
2353
2354         return 0;
2355 }
2356 EXPORT_SYMBOL(hci_recv_frame);
2357
2358 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2359                           int count, __u8 index)
2360 {
2361         int len = 0;
2362         int hlen = 0;
2363         int remain = count;
2364         struct sk_buff *skb;
2365         struct bt_skb_cb *scb;
2366
2367         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2368             index >= NUM_REASSEMBLY)
2369                 return -EILSEQ;
2370
2371         skb = hdev->reassembly[index];
2372
2373         if (!skb) {
2374                 switch (type) {
2375                 case HCI_ACLDATA_PKT:
2376                         len = HCI_MAX_FRAME_SIZE;
2377                         hlen = HCI_ACL_HDR_SIZE;
2378                         break;
2379                 case HCI_EVENT_PKT:
2380                         len = HCI_MAX_EVENT_SIZE;
2381                         hlen = HCI_EVENT_HDR_SIZE;
2382                         break;
2383                 case HCI_SCODATA_PKT:
2384                         len = HCI_MAX_SCO_SIZE;
2385                         hlen = HCI_SCO_HDR_SIZE;
2386                         break;
2387                 }
2388
2389                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2390                 if (!skb)
2391                         return -ENOMEM;
2392
2393                 scb = (void *) skb->cb;
2394                 scb->expect = hlen;
2395                 scb->pkt_type = type;
2396
2397                 skb->dev = (void *) hdev;
2398                 hdev->reassembly[index] = skb;
2399         }
2400
2401         while (count) {
2402                 scb = (void *) skb->cb;
2403                 len = min_t(uint, scb->expect, count);
2404
2405                 memcpy(skb_put(skb, len), data, len);
2406
2407                 count -= len;
2408                 data += len;
2409                 scb->expect -= len;
2410                 remain = count;
2411
2412                 switch (type) {
2413                 case HCI_EVENT_PKT:
2414                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2415                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2416                                 scb->expect = h->plen;
2417
2418                                 if (skb_tailroom(skb) < scb->expect) {
2419                                         kfree_skb(skb);
2420                                         hdev->reassembly[index] = NULL;
2421                                         return -ENOMEM;
2422                                 }
2423                         }
2424                         break;
2425
2426                 case HCI_ACLDATA_PKT:
2427                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2428                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2429                                 scb->expect = __le16_to_cpu(h->dlen);
2430
2431                                 if (skb_tailroom(skb) < scb->expect) {
2432                                         kfree_skb(skb);
2433                                         hdev->reassembly[index] = NULL;
2434                                         return -ENOMEM;
2435                                 }
2436                         }
2437                         break;
2438
2439                 case HCI_SCODATA_PKT:
2440                         if (skb->len == HCI_SCO_HDR_SIZE) {
2441                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2442                                 scb->expect = h->dlen;
2443
2444                                 if (skb_tailroom(skb) < scb->expect) {
2445                                         kfree_skb(skb);
2446                                         hdev->reassembly[index] = NULL;
2447                                         return -ENOMEM;
2448                                 }
2449                         }
2450                         break;
2451                 }
2452
2453                 if (scb->expect == 0) {
2454                         /* Complete frame */
2455
2456                         bt_cb(skb)->pkt_type = type;
2457                         hci_recv_frame(skb);
2458
2459                         hdev->reassembly[index] = NULL;
2460                         return remain;
2461                 }
2462         }
2463
2464         return remain;
2465 }
2466
2467 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2468 {
2469         int rem = 0;
2470
2471         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2472                 return -EILSEQ;
2473
2474         while (count) {
2475                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2476                 if (rem < 0)
2477                         return rem;
2478
2479                 data += (count - rem);
2480                 count = rem;
2481         }
2482
2483         return rem;
2484 }
2485 EXPORT_SYMBOL(hci_recv_fragment);
2486
2487 #define STREAM_REASSEMBLY 0
2488
2489 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2490 {
2491         int type;
2492         int rem = 0;
2493
2494         while (count) {
2495                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2496
2497                 if (!skb) {
2498                         struct { char type; } *pkt;
2499
2500                         /* Start of the frame */
2501                         pkt = data;
2502                         type = pkt->type;
2503
2504                         data++;
2505                         count--;
2506                 } else
2507                         type = bt_cb(skb)->pkt_type;
2508
2509                 rem = hci_reassembly(hdev, type, data, count,
2510                                      STREAM_REASSEMBLY);
2511                 if (rem < 0)
2512                         return rem;
2513
2514                 data += (count - rem);
2515                 count = rem;
2516         }
2517
2518         return rem;
2519 }
2520 EXPORT_SYMBOL(hci_recv_stream_fragment);
2521
2522 /* ---- Interface to upper protocols ---- */
2523
2524 int hci_register_cb(struct hci_cb *cb)
2525 {
2526         BT_DBG("%p name %s", cb, cb->name);
2527
2528         write_lock(&hci_cb_list_lock);
2529         list_add(&cb->list, &hci_cb_list);
2530         write_unlock(&hci_cb_list_lock);
2531
2532         return 0;
2533 }
2534 EXPORT_SYMBOL(hci_register_cb);
2535
2536 int hci_unregister_cb(struct hci_cb *cb)
2537 {
2538         BT_DBG("%p name %s", cb, cb->name);
2539
2540         write_lock(&hci_cb_list_lock);
2541         list_del(&cb->list);
2542         write_unlock(&hci_cb_list_lock);
2543
2544         return 0;
2545 }
2546 EXPORT_SYMBOL(hci_unregister_cb);
2547
2548 static int hci_send_frame(struct sk_buff *skb)
2549 {
2550         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2551
2552         if (!hdev) {
2553                 kfree_skb(skb);
2554                 return -ENODEV;
2555         }
2556
2557         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2558
2559         /* Time stamp */
2560         __net_timestamp(skb);
2561
2562         /* Send copy to monitor */
2563         hci_send_to_monitor(hdev, skb);
2564
2565         if (atomic_read(&hdev->promisc)) {
2566                 /* Send copy to the sockets */
2567                 hci_send_to_sock(hdev, skb);
2568         }
2569
2570         /* Get rid of skb owner, prior to sending to the driver. */
2571         skb_orphan(skb);
2572
2573         return hdev->send(skb);
2574 }
2575
2576 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2577 {
2578         skb_queue_head_init(&req->cmd_q);
2579         req->hdev = hdev;
2580         req->err = 0;
2581 }
2582
2583 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2584 {
2585         struct hci_dev *hdev = req->hdev;
2586         struct sk_buff *skb;
2587         unsigned long flags;
2588
2589         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2590
2591         /* If an error occured during request building, remove all HCI
2592          * commands queued on the HCI request queue.
2593          */
2594         if (req->err) {
2595                 skb_queue_purge(&req->cmd_q);
2596                 return req->err;
2597         }
2598
2599         /* Do not allow empty requests */
2600         if (skb_queue_empty(&req->cmd_q))
2601                 return -ENODATA;
2602
2603         skb = skb_peek_tail(&req->cmd_q);
2604         bt_cb(skb)->req.complete = complete;
2605
2606         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2607         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2608         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2609
2610         queue_work(hdev->workqueue, &hdev->cmd_work);
2611
2612         return 0;
2613 }
2614
2615 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2616                                        u32 plen, const void *param)
2617 {
2618         int len = HCI_COMMAND_HDR_SIZE + plen;
2619         struct hci_command_hdr *hdr;
2620         struct sk_buff *skb;
2621
2622         skb = bt_skb_alloc(len, GFP_ATOMIC);
2623         if (!skb)
2624                 return NULL;
2625
2626         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2627         hdr->opcode = cpu_to_le16(opcode);
2628         hdr->plen   = plen;
2629
2630         if (plen)
2631                 memcpy(skb_put(skb, plen), param, plen);
2632
2633         BT_DBG("skb len %d", skb->len);
2634
2635         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2636         skb->dev = (void *) hdev;
2637
2638         return skb;
2639 }
2640
2641 /* Send HCI command */
2642 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2643                  const void *param)
2644 {
2645         struct sk_buff *skb;
2646
2647         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2648
2649         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2650         if (!skb) {
2651                 BT_ERR("%s no memory for command", hdev->name);
2652                 return -ENOMEM;
2653         }
2654
2655         /* Stand-alone HCI commands must be flaged as
2656          * single-command requests.
2657          */
2658         bt_cb(skb)->req.start = true;
2659
2660         skb_queue_tail(&hdev->cmd_q, skb);
2661         queue_work(hdev->workqueue, &hdev->cmd_work);
2662
2663         return 0;
2664 }
2665
2666 /* Queue a command to an asynchronous HCI request */
2667 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2668                     const void *param, u8 event)
2669 {
2670         struct hci_dev *hdev = req->hdev;
2671         struct sk_buff *skb;
2672
2673         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2674
2675         /* If an error occured during request building, there is no point in
2676          * queueing the HCI command. We can simply return.
2677          */
2678         if (req->err)
2679                 return;
2680
2681         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2682         if (!skb) {
2683                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2684                        hdev->name, opcode);
2685                 req->err = -ENOMEM;
2686                 return;
2687         }
2688
2689         if (skb_queue_empty(&req->cmd_q))
2690                 bt_cb(skb)->req.start = true;
2691
2692         bt_cb(skb)->req.event = event;
2693
2694         skb_queue_tail(&req->cmd_q, skb);
2695 }
2696
2697 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2698                  const void *param)
2699 {
2700         hci_req_add_ev(req, opcode, plen, param, 0);
2701 }
2702
2703 /* Get data from the previously sent command */
2704 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2705 {
2706         struct hci_command_hdr *hdr;
2707
2708         if (!hdev->sent_cmd)
2709                 return NULL;
2710
2711         hdr = (void *) hdev->sent_cmd->data;
2712
2713         if (hdr->opcode != cpu_to_le16(opcode))
2714                 return NULL;
2715
2716         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2717
2718         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2719 }
2720
2721 /* Send ACL data */
2722 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2723 {
2724         struct hci_acl_hdr *hdr;
2725         int len = skb->len;
2726
2727         skb_push(skb, HCI_ACL_HDR_SIZE);
2728         skb_reset_transport_header(skb);
2729         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2730         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2731         hdr->dlen   = cpu_to_le16(len);
2732 }
2733
2734 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2735                           struct sk_buff *skb, __u16 flags)
2736 {
2737         struct hci_conn *conn = chan->conn;
2738         struct hci_dev *hdev = conn->hdev;
2739         struct sk_buff *list;
2740
2741         skb->len = skb_headlen(skb);
2742         skb->data_len = 0;
2743
2744         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2745
2746         switch (hdev->dev_type) {
2747         case HCI_BREDR:
2748                 hci_add_acl_hdr(skb, conn->handle, flags);
2749                 break;
2750         case HCI_AMP:
2751                 hci_add_acl_hdr(skb, chan->handle, flags);
2752                 break;
2753         default:
2754                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2755                 return;
2756         }
2757
2758         list = skb_shinfo(skb)->frag_list;
2759         if (!list) {
2760                 /* Non fragmented */
2761                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2762
2763                 skb_queue_tail(queue, skb);
2764         } else {
2765                 /* Fragmented */
2766                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2767
2768                 skb_shinfo(skb)->frag_list = NULL;
2769
2770                 /* Queue all fragments atomically */
2771                 spin_lock(&queue->lock);
2772
2773                 __skb_queue_tail(queue, skb);
2774
2775                 flags &= ~ACL_START;
2776                 flags |= ACL_CONT;
2777                 do {
2778                         skb = list; list = list->next;
2779
2780                         skb->dev = (void *) hdev;
2781                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2782                         hci_add_acl_hdr(skb, conn->handle, flags);
2783
2784                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2785
2786                         __skb_queue_tail(queue, skb);
2787                 } while (list);
2788
2789                 spin_unlock(&queue->lock);
2790         }
2791 }
2792
2793 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2794 {
2795         struct hci_dev *hdev = chan->conn->hdev;
2796
2797         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2798
2799         skb->dev = (void *) hdev;
2800
2801         hci_queue_acl(chan, &chan->data_q, skb, flags);
2802
2803         queue_work(hdev->workqueue, &hdev->tx_work);
2804 }
2805
2806 /* Send SCO data */
2807 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2808 {
2809         struct hci_dev *hdev = conn->hdev;
2810         struct hci_sco_hdr hdr;
2811
2812         BT_DBG("%s len %d", hdev->name, skb->len);
2813
2814         hdr.handle = cpu_to_le16(conn->handle);
2815         hdr.dlen   = skb->len;
2816
2817         skb_push(skb, HCI_SCO_HDR_SIZE);
2818         skb_reset_transport_header(skb);
2819         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2820
2821         skb->dev = (void *) hdev;
2822         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2823
2824         skb_queue_tail(&conn->data_q, skb);
2825         queue_work(hdev->workqueue, &hdev->tx_work);
2826 }
2827
2828 /* ---- HCI TX task (outgoing data) ---- */
2829
2830 /* HCI Connection scheduler */
2831 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2832                                      int *quote)
2833 {
2834         struct hci_conn_hash *h = &hdev->conn_hash;
2835         struct hci_conn *conn = NULL, *c;
2836         unsigned int num = 0, min = ~0;
2837
2838         /* We don't have to lock device here. Connections are always
2839          * added and removed with TX task disabled. */
2840
2841         rcu_read_lock();
2842
2843         list_for_each_entry_rcu(c, &h->list, list) {
2844                 if (c->type != type || skb_queue_empty(&c->data_q))
2845                         continue;
2846
2847                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2848                         continue;
2849
2850                 num++;
2851
2852                 if (c->sent < min) {
2853                         min  = c->sent;
2854                         conn = c;
2855                 }
2856
2857                 if (hci_conn_num(hdev, type) == num)
2858                         break;
2859         }
2860
2861         rcu_read_unlock();
2862
2863         if (conn) {
2864                 int cnt, q;
2865
2866                 switch (conn->type) {
2867                 case ACL_LINK:
2868                         cnt = hdev->acl_cnt;
2869                         break;
2870                 case SCO_LINK:
2871                 case ESCO_LINK:
2872                         cnt = hdev->sco_cnt;
2873                         break;
2874                 case LE_LINK:
2875                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2876                         break;
2877                 default:
2878                         cnt = 0;
2879                         BT_ERR("Unknown link type");
2880                 }
2881
2882                 q = cnt / num;
2883                 *quote = q ? q : 1;
2884         } else
2885                 *quote = 0;
2886
2887         BT_DBG("conn %p quote %d", conn, *quote);
2888         return conn;
2889 }
2890
2891 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2892 {
2893         struct hci_conn_hash *h = &hdev->conn_hash;
2894         struct hci_conn *c;
2895
2896         BT_ERR("%s link tx timeout", hdev->name);
2897
2898         rcu_read_lock();
2899
2900         /* Kill stalled connections */
2901         list_for_each_entry_rcu(c, &h->list, list) {
2902                 if (c->type == type && c->sent) {
2903                         BT_ERR("%s killing stalled connection %pMR",
2904                                hdev->name, &c->dst);
2905                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2906                 }
2907         }
2908
2909         rcu_read_unlock();
2910 }
2911
2912 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2913                                       int *quote)
2914 {
2915         struct hci_conn_hash *h = &hdev->conn_hash;
2916         struct hci_chan *chan = NULL;
2917         unsigned int num = 0, min = ~0, cur_prio = 0;
2918         struct hci_conn *conn;
2919         int cnt, q, conn_num = 0;
2920
2921         BT_DBG("%s", hdev->name);
2922
2923         rcu_read_lock();
2924
2925         list_for_each_entry_rcu(conn, &h->list, list) {
2926                 struct hci_chan *tmp;
2927
2928                 if (conn->type != type)
2929                         continue;
2930
2931                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2932                         continue;
2933
2934                 conn_num++;
2935
2936                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2937                         struct sk_buff *skb;
2938
2939                         if (skb_queue_empty(&tmp->data_q))
2940                                 continue;
2941
2942                         skb = skb_peek(&tmp->data_q);
2943                         if (skb->priority < cur_prio)
2944                                 continue;
2945
2946                         if (skb->priority > cur_prio) {
2947                                 num = 0;
2948                                 min = ~0;
2949                                 cur_prio = skb->priority;
2950                         }
2951
2952                         num++;
2953
2954                         if (conn->sent < min) {
2955                                 min  = conn->sent;
2956                                 chan = tmp;
2957                         }
2958                 }
2959
2960                 if (hci_conn_num(hdev, type) == conn_num)
2961                         break;
2962         }
2963
2964         rcu_read_unlock();
2965
2966         if (!chan)
2967                 return NULL;
2968
2969         switch (chan->conn->type) {
2970         case ACL_LINK:
2971                 cnt = hdev->acl_cnt;
2972                 break;
2973         case AMP_LINK:
2974                 cnt = hdev->block_cnt;
2975                 break;
2976         case SCO_LINK:
2977         case ESCO_LINK:
2978                 cnt = hdev->sco_cnt;
2979                 break;
2980         case LE_LINK:
2981                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2982                 break;
2983         default:
2984                 cnt = 0;
2985                 BT_ERR("Unknown link type");
2986         }
2987
2988         q = cnt / num;
2989         *quote = q ? q : 1;
2990         BT_DBG("chan %p quote %d", chan, *quote);
2991         return chan;
2992 }
2993
2994 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2995 {
2996         struct hci_conn_hash *h = &hdev->conn_hash;
2997         struct hci_conn *conn;
2998         int num = 0;
2999
3000         BT_DBG("%s", hdev->name);
3001
3002         rcu_read_lock();
3003
3004         list_for_each_entry_rcu(conn, &h->list, list) {
3005                 struct hci_chan *chan;
3006
3007                 if (conn->type != type)
3008                         continue;
3009
3010                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3011                         continue;
3012
3013                 num++;
3014
3015                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3016                         struct sk_buff *skb;
3017
3018                         if (chan->sent) {
3019                                 chan->sent = 0;
3020                                 continue;
3021                         }
3022
3023                         if (skb_queue_empty(&chan->data_q))
3024                                 continue;
3025
3026                         skb = skb_peek(&chan->data_q);
3027                         if (skb->priority >= HCI_PRIO_MAX - 1)
3028                                 continue;
3029
3030                         skb->priority = HCI_PRIO_MAX - 1;
3031
3032                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3033                                skb->priority);
3034                 }
3035
3036                 if (hci_conn_num(hdev, type) == num)
3037                         break;
3038         }
3039
3040         rcu_read_unlock();
3041
3042 }
3043
3044 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3045 {
3046         /* Calculate count of blocks used by this packet */
3047         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3048 }
3049
3050 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3051 {
3052         if (!test_bit(HCI_RAW, &hdev->flags)) {
3053                 /* ACL tx timeout must be longer than maximum
3054                  * link supervision timeout (40.9 seconds) */
3055                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3056                                        HCI_ACL_TX_TIMEOUT))
3057                         hci_link_tx_to(hdev, ACL_LINK);
3058         }
3059 }
3060
3061 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3062 {
3063         unsigned int cnt = hdev->acl_cnt;
3064         struct hci_chan *chan;
3065         struct sk_buff *skb;
3066         int quote;
3067
3068         __check_timeout(hdev, cnt);
3069
3070         while (hdev->acl_cnt &&
3071                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3072                 u32 priority = (skb_peek(&chan->data_q))->priority;
3073                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3074                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3075                                skb->len, skb->priority);
3076
3077                         /* Stop if priority has changed */
3078                         if (skb->priority < priority)
3079                                 break;
3080
3081                         skb = skb_dequeue(&chan->data_q);
3082
3083                         hci_conn_enter_active_mode(chan->conn,
3084                                                    bt_cb(skb)->force_active);
3085
3086                         hci_send_frame(skb);
3087                         hdev->acl_last_tx = jiffies;
3088
3089                         hdev->acl_cnt--;
3090                         chan->sent++;
3091                         chan->conn->sent++;
3092                 }
3093         }
3094
3095         if (cnt != hdev->acl_cnt)
3096                 hci_prio_recalculate(hdev, ACL_LINK);
3097 }
3098
3099 static void hci_sched_acl_blk(struct hci_dev *hdev)
3100 {
3101         unsigned int cnt = hdev->block_cnt;
3102         struct hci_chan *chan;
3103         struct sk_buff *skb;
3104         int quote;
3105         u8 type;
3106
3107         __check_timeout(hdev, cnt);
3108
3109         BT_DBG("%s", hdev->name);
3110
3111         if (hdev->dev_type == HCI_AMP)
3112                 type = AMP_LINK;
3113         else
3114                 type = ACL_LINK;
3115
3116         while (hdev->block_cnt > 0 &&
3117                (chan = hci_chan_sent(hdev, type, &quote))) {
3118                 u32 priority = (skb_peek(&chan->data_q))->priority;
3119                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3120                         int blocks;
3121
3122                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3123                                skb->len, skb->priority);
3124
3125                         /* Stop if priority has changed */
3126                         if (skb->priority < priority)
3127                                 break;
3128
3129                         skb = skb_dequeue(&chan->data_q);
3130
3131                         blocks = __get_blocks(hdev, skb);
3132                         if (blocks > hdev->block_cnt)
3133                                 return;
3134
3135                         hci_conn_enter_active_mode(chan->conn,
3136                                                    bt_cb(skb)->force_active);
3137
3138                         hci_send_frame(skb);
3139                         hdev->acl_last_tx = jiffies;
3140
3141                         hdev->block_cnt -= blocks;
3142                         quote -= blocks;
3143
3144                         chan->sent += blocks;
3145                         chan->conn->sent += blocks;
3146                 }
3147         }
3148
3149         if (cnt != hdev->block_cnt)
3150                 hci_prio_recalculate(hdev, type);
3151 }
3152
3153 static void hci_sched_acl(struct hci_dev *hdev)
3154 {
3155         BT_DBG("%s", hdev->name);
3156
3157         /* No ACL link over BR/EDR controller */
3158         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3159                 return;
3160
3161         /* No AMP link over AMP controller */
3162         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3163                 return;
3164
3165         switch (hdev->flow_ctl_mode) {
3166         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3167                 hci_sched_acl_pkt(hdev);
3168                 break;
3169
3170         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3171                 hci_sched_acl_blk(hdev);
3172                 break;
3173         }
3174 }
3175
3176 /* Schedule SCO */
3177 static void hci_sched_sco(struct hci_dev *hdev)
3178 {
3179         struct hci_conn *conn;
3180         struct sk_buff *skb;
3181         int quote;
3182
3183         BT_DBG("%s", hdev->name);
3184
3185         if (!hci_conn_num(hdev, SCO_LINK))
3186                 return;
3187
3188         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3189                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3190                         BT_DBG("skb %p len %d", skb, skb->len);
3191                         hci_send_frame(skb);
3192
3193                         conn->sent++;
3194                         if (conn->sent == ~0)
3195                                 conn->sent = 0;
3196                 }
3197         }
3198 }
3199
3200 static void hci_sched_esco(struct hci_dev *hdev)
3201 {
3202         struct hci_conn *conn;
3203         struct sk_buff *skb;
3204         int quote;
3205
3206         BT_DBG("%s", hdev->name);
3207
3208         if (!hci_conn_num(hdev, ESCO_LINK))
3209                 return;
3210
3211         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3212                                                      &quote))) {
3213                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3214                         BT_DBG("skb %p len %d", skb, skb->len);
3215                         hci_send_frame(skb);
3216
3217                         conn->sent++;
3218                         if (conn->sent == ~0)
3219                                 conn->sent = 0;
3220                 }
3221         }
3222 }
3223
3224 static void hci_sched_le(struct hci_dev *hdev)
3225 {
3226         struct hci_chan *chan;
3227         struct sk_buff *skb;
3228         int quote, cnt, tmp;
3229
3230         BT_DBG("%s", hdev->name);
3231
3232         if (!hci_conn_num(hdev, LE_LINK))
3233                 return;
3234
3235         if (!test_bit(HCI_RAW, &hdev->flags)) {
3236                 /* LE tx timeout must be longer than maximum
3237                  * link supervision timeout (40.9 seconds) */
3238                 if (!hdev->le_cnt && hdev->le_pkts &&
3239                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3240                         hci_link_tx_to(hdev, LE_LINK);
3241         }
3242
3243         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3244         tmp = cnt;
3245         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3246                 u32 priority = (skb_peek(&chan->data_q))->priority;
3247                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3248                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3249                                skb->len, skb->priority);
3250
3251                         /* Stop if priority has changed */
3252                         if (skb->priority < priority)
3253                                 break;
3254
3255                         skb = skb_dequeue(&chan->data_q);
3256
3257                         hci_send_frame(skb);
3258                         hdev->le_last_tx = jiffies;
3259
3260                         cnt--;
3261                         chan->sent++;
3262                         chan->conn->sent++;
3263                 }
3264         }
3265
3266         if (hdev->le_pkts)
3267                 hdev->le_cnt = cnt;
3268         else
3269                 hdev->acl_cnt = cnt;
3270
3271         if (cnt != tmp)
3272                 hci_prio_recalculate(hdev, LE_LINK);
3273 }
3274
3275 static void hci_tx_work(struct work_struct *work)
3276 {
3277         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3278         struct sk_buff *skb;
3279
3280         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3281                hdev->sco_cnt, hdev->le_cnt);
3282
3283         /* Schedule queues and send stuff to HCI driver */
3284
3285         hci_sched_acl(hdev);
3286
3287         hci_sched_sco(hdev);
3288
3289         hci_sched_esco(hdev);
3290
3291         hci_sched_le(hdev);
3292
3293         /* Send next queued raw (unknown type) packet */
3294         while ((skb = skb_dequeue(&hdev->raw_q)))
3295                 hci_send_frame(skb);
3296 }
3297
3298 /* ----- HCI RX task (incoming data processing) ----- */
3299
3300 /* ACL data packet */
3301 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3302 {
3303         struct hci_acl_hdr *hdr = (void *) skb->data;
3304         struct hci_conn *conn;
3305         __u16 handle, flags;
3306
3307         skb_pull(skb, HCI_ACL_HDR_SIZE);
3308
3309         handle = __le16_to_cpu(hdr->handle);
3310         flags  = hci_flags(handle);
3311         handle = hci_handle(handle);
3312
3313         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3314                handle, flags);
3315
3316         hdev->stat.acl_rx++;
3317
3318         hci_dev_lock(hdev);
3319         conn = hci_conn_hash_lookup_handle(hdev, handle);
3320         hci_dev_unlock(hdev);
3321
3322         if (conn) {
3323                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3324
3325                 /* Send to upper protocol */
3326                 l2cap_recv_acldata(conn, skb, flags);
3327                 return;
3328         } else {
3329                 BT_ERR("%s ACL packet for unknown connection handle %d",
3330                        hdev->name, handle);
3331         }
3332
3333         kfree_skb(skb);
3334 }
3335
3336 /* SCO data packet */
3337 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3338 {
3339         struct hci_sco_hdr *hdr = (void *) skb->data;
3340         struct hci_conn *conn;
3341         __u16 handle;
3342
3343         skb_pull(skb, HCI_SCO_HDR_SIZE);
3344
3345         handle = __le16_to_cpu(hdr->handle);
3346
3347         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3348
3349         hdev->stat.sco_rx++;
3350
3351         hci_dev_lock(hdev);
3352         conn = hci_conn_hash_lookup_handle(hdev, handle);
3353         hci_dev_unlock(hdev);
3354
3355         if (conn) {
3356                 /* Send to upper protocol */
3357                 sco_recv_scodata(conn, skb);
3358                 return;
3359         } else {
3360                 BT_ERR("%s SCO packet for unknown connection handle %d",
3361                        hdev->name, handle);
3362         }
3363
3364         kfree_skb(skb);
3365 }
3366
3367 static bool hci_req_is_complete(struct hci_dev *hdev)
3368 {
3369         struct sk_buff *skb;
3370
3371         skb = skb_peek(&hdev->cmd_q);
3372         if (!skb)
3373                 return true;
3374
3375         return bt_cb(skb)->req.start;
3376 }
3377
3378 static void hci_resend_last(struct hci_dev *hdev)
3379 {
3380         struct hci_command_hdr *sent;
3381         struct sk_buff *skb;
3382         u16 opcode;
3383
3384         if (!hdev->sent_cmd)
3385                 return;
3386
3387         sent = (void *) hdev->sent_cmd->data;
3388         opcode = __le16_to_cpu(sent->opcode);
3389         if (opcode == HCI_OP_RESET)
3390                 return;
3391
3392         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3393         if (!skb)
3394                 return;
3395
3396         skb_queue_head(&hdev->cmd_q, skb);
3397         queue_work(hdev->workqueue, &hdev->cmd_work);
3398 }
3399
3400 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3401 {
3402         hci_req_complete_t req_complete = NULL;
3403         struct sk_buff *skb;
3404         unsigned long flags;
3405
3406         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3407
3408         /* If the completed command doesn't match the last one that was
3409          * sent we need to do special handling of it.
3410          */
3411         if (!hci_sent_cmd_data(hdev, opcode)) {
3412                 /* Some CSR based controllers generate a spontaneous
3413                  * reset complete event during init and any pending
3414                  * command will never be completed. In such a case we
3415                  * need to resend whatever was the last sent
3416                  * command.
3417                  */
3418                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3419                         hci_resend_last(hdev);
3420
3421                 return;
3422         }
3423
3424         /* If the command succeeded and there's still more commands in
3425          * this request the request is not yet complete.
3426          */
3427         if (!status && !hci_req_is_complete(hdev))
3428                 return;
3429
3430         /* If this was the last command in a request the complete
3431          * callback would be found in hdev->sent_cmd instead of the
3432          * command queue (hdev->cmd_q).
3433          */
3434         if (hdev->sent_cmd) {
3435                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3436                 if (req_complete)
3437                         goto call_complete;
3438         }
3439
3440         /* Remove all pending commands belonging to this request */
3441         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3442         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3443                 if (bt_cb(skb)->req.start) {
3444                         __skb_queue_head(&hdev->cmd_q, skb);
3445                         break;
3446                 }
3447
3448                 req_complete = bt_cb(skb)->req.complete;
3449                 kfree_skb(skb);
3450         }
3451         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3452
3453 call_complete:
3454         if (req_complete)
3455                 req_complete(hdev, status);
3456 }
3457
3458 static void hci_rx_work(struct work_struct *work)
3459 {
3460         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3461         struct sk_buff *skb;
3462
3463         BT_DBG("%s", hdev->name);
3464
3465         while ((skb = skb_dequeue(&hdev->rx_q))) {
3466                 /* Send copy to monitor */
3467                 hci_send_to_monitor(hdev, skb);
3468
3469                 if (atomic_read(&hdev->promisc)) {
3470                         /* Send copy to the sockets */
3471                         hci_send_to_sock(hdev, skb);
3472                 }
3473
3474                 if (test_bit(HCI_RAW, &hdev->flags)) {
3475                         kfree_skb(skb);
3476                         continue;
3477                 }
3478
3479                 if (test_bit(HCI_INIT, &hdev->flags)) {
3480                         /* Don't process data packets in this states. */
3481                         switch (bt_cb(skb)->pkt_type) {
3482                         case HCI_ACLDATA_PKT:
3483                         case HCI_SCODATA_PKT:
3484                                 kfree_skb(skb);
3485                                 continue;
3486                         }
3487                 }
3488
3489                 /* Process frame */
3490                 switch (bt_cb(skb)->pkt_type) {
3491                 case HCI_EVENT_PKT:
3492                         BT_DBG("%s Event packet", hdev->name);
3493                         hci_event_packet(hdev, skb);
3494                         break;
3495
3496                 case HCI_ACLDATA_PKT:
3497                         BT_DBG("%s ACL data packet", hdev->name);
3498                         hci_acldata_packet(hdev, skb);
3499                         break;
3500
3501                 case HCI_SCODATA_PKT:
3502                         BT_DBG("%s SCO data packet", hdev->name);
3503                         hci_scodata_packet(hdev, skb);
3504                         break;
3505
3506                 default:
3507                         kfree_skb(skb);
3508                         break;
3509                 }
3510         }
3511 }
3512
3513 static void hci_cmd_work(struct work_struct *work)
3514 {
3515         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3516         struct sk_buff *skb;
3517
3518         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3519                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3520
3521         /* Send queued commands */
3522         if (atomic_read(&hdev->cmd_cnt)) {
3523                 skb = skb_dequeue(&hdev->cmd_q);
3524                 if (!skb)
3525                         return;
3526
3527                 kfree_skb(hdev->sent_cmd);
3528
3529                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3530                 if (hdev->sent_cmd) {
3531                         atomic_dec(&hdev->cmd_cnt);
3532                         hci_send_frame(skb);
3533                         if (test_bit(HCI_RESET, &hdev->flags))
3534                                 del_timer(&hdev->cmd_timer);
3535                         else
3536                                 mod_timer(&hdev->cmd_timer,
3537                                           jiffies + HCI_CMD_TIMEOUT);
3538                 } else {
3539                         skb_queue_head(&hdev->cmd_q, skb);
3540                         queue_work(hdev->workqueue, &hdev->cmd_work);
3541                 }
3542         }
3543 }
3544
3545 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3546 {
3547         /* General inquiry access code (GIAC) */
3548         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3549         struct hci_cp_inquiry cp;
3550
3551         BT_DBG("%s", hdev->name);
3552
3553         if (test_bit(HCI_INQUIRY, &hdev->flags))
3554                 return -EINPROGRESS;
3555
3556         inquiry_cache_flush(hdev);
3557
3558         memset(&cp, 0, sizeof(cp));
3559         memcpy(&cp.lap, lap, sizeof(cp.lap));
3560         cp.length  = length;
3561
3562         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3563 }
3564
3565 int hci_cancel_inquiry(struct hci_dev *hdev)
3566 {
3567         BT_DBG("%s", hdev->name);
3568
3569         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3570                 return -EALREADY;
3571
3572         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3573 }
3574
3575 u8 bdaddr_to_le(u8 bdaddr_type)
3576 {
3577         switch (bdaddr_type) {
3578         case BDADDR_LE_PUBLIC:
3579                 return ADDR_LE_DEV_PUBLIC;
3580
3581         default:
3582                 /* Fallback to LE Random address type */
3583                 return ADDR_LE_DEV_RANDOM;
3584         }
3585 }