blob: b562bcdfcd7ee9c461efab7def1f01592b3105ae [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <net/sock.h>
39
40#include <linux/uaccess.h>
41#include <asm/unaligned.h>
42
43#include <net/bluetooth/bluetooth.h>
44#include <net/bluetooth/hci_core.h>
45
46/* Handle HCI Event packets */
47
48static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
49{
50 __u8 status = *((__u8 *) skb->data);
51
52 BT_DBG("%s status 0x%x", hdev->name, status);
53
54 if (status) {
55 hci_dev_lock(hdev);
56 mgmt_stop_discovery_failed(hdev, status);
57 hci_dev_unlock(hdev);
58 return;
59 }
60
61 clear_bit(HCI_INQUIRY, &hdev->flags);
62
63 hci_dev_lock(hdev);
64 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
65 hci_dev_unlock(hdev);
66
67 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
68
69 hci_conn_check_pending(hdev);
70}
71
72static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73{
74 __u8 status = *((__u8 *) skb->data);
75
76 BT_DBG("%s status 0x%x", hdev->name, status);
77
78 if (status)
79 return;
80
81 hci_conn_check_pending(hdev);
82}
83
84static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
85{
86 BT_DBG("%s", hdev->name);
87}
88
89static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
90{
91 struct hci_rp_role_discovery *rp = (void *) skb->data;
92 struct hci_conn *conn;
93
94 BT_DBG("%s status 0x%x", hdev->name, rp->status);
95
96 if (rp->status)
97 return;
98
99 hci_dev_lock(hdev);
100
101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
102 if (conn) {
103 if (rp->role)
104 conn->link_mode &= ~HCI_LM_MASTER;
105 else
106 conn->link_mode |= HCI_LM_MASTER;
107 }
108
109 hci_dev_unlock(hdev);
110}
111
112static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113{
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
116
117 BT_DBG("%s status 0x%x", hdev->name, rp->status);
118
119 if (rp->status)
120 return;
121
122 hci_dev_lock(hdev);
123
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 if (conn)
126 conn->link_policy = __le16_to_cpu(rp->policy);
127
128 hci_dev_unlock(hdev);
129}
130
131static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132{
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
135 void *sent;
136
137 BT_DBG("%s status 0x%x", hdev->name, rp->status);
138
139 if (rp->status)
140 return;
141
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 if (!sent)
144 return;
145
146 hci_dev_lock(hdev);
147
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = get_unaligned_le16(sent + 2);
151
152 hci_dev_unlock(hdev);
153}
154
155static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156{
157 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158
159 BT_DBG("%s status 0x%x", hdev->name, rp->status);
160
161 if (rp->status)
162 return;
163
164 hdev->link_policy = __le16_to_cpu(rp->policy);
165}
166
167static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
168{
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%x", hdev->name, status);
173
174 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
175 if (!sent)
176 return;
177
178 if (!status)
179 hdev->link_policy = get_unaligned_le16(sent);
180
181 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
182}
183
184static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185{
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 hci_req_complete(hdev, HCI_OP_RESET, status);
193
194 /* Reset all non-persistent flags */
195 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS));
196
197 hdev->discovery.state = DISCOVERY_STOPPED;
198}
199
200static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
201{
202 __u8 status = *((__u8 *) skb->data);
203 void *sent;
204
205 BT_DBG("%s status 0x%x", hdev->name, status);
206
207 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
208 if (!sent)
209 return;
210
211 hci_dev_lock(hdev);
212
213 if (test_bit(HCI_MGMT, &hdev->dev_flags))
214 mgmt_set_local_name_complete(hdev, sent, status);
215 else if (!status)
216 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
217
218 hci_dev_unlock(hdev);
219
220 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
221}
222
223static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
224{
225 struct hci_rp_read_local_name *rp = (void *) skb->data;
226
227 BT_DBG("%s status 0x%x", hdev->name, rp->status);
228
229 if (rp->status)
230 return;
231
232 if (test_bit(HCI_SETUP, &hdev->dev_flags))
233 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
234}
235
236static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
237{
238 __u8 status = *((__u8 *) skb->data);
239 void *sent;
240
241 BT_DBG("%s status 0x%x", hdev->name, status);
242
243 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
244 if (!sent)
245 return;
246
247 if (!status) {
248 __u8 param = *((__u8 *) sent);
249
250 if (param == AUTH_ENABLED)
251 set_bit(HCI_AUTH, &hdev->flags);
252 else
253 clear_bit(HCI_AUTH, &hdev->flags);
254 }
255
256 if (test_bit(HCI_MGMT, &hdev->dev_flags))
257 mgmt_auth_enable_complete(hdev, status);
258
259 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
260}
261
262static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
263{
264 __u8 status = *((__u8 *) skb->data);
265 void *sent;
266
267 BT_DBG("%s status 0x%x", hdev->name, status);
268
269 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
270 if (!sent)
271 return;
272
273 if (!status) {
274 __u8 param = *((__u8 *) sent);
275
276 if (param)
277 set_bit(HCI_ENCRYPT, &hdev->flags);
278 else
279 clear_bit(HCI_ENCRYPT, &hdev->flags);
280 }
281
282 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
283}
284
285static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
286{
287 __u8 param, status = *((__u8 *) skb->data);
288 int old_pscan, old_iscan;
289 void *sent;
290
291 BT_DBG("%s status 0x%x", hdev->name, status);
292
293 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
294 if (!sent)
295 return;
296
297 param = *((__u8 *) sent);
298
299 hci_dev_lock(hdev);
300
301 if (status != 0) {
302 mgmt_write_scan_failed(hdev, param, status);
303 hdev->discov_timeout = 0;
304 goto done;
305 }
306
307 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
308 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
309
310 if (param & SCAN_INQUIRY) {
311 set_bit(HCI_ISCAN, &hdev->flags);
312 if (!old_iscan)
313 mgmt_discoverable(hdev, 1);
314 if (hdev->discov_timeout > 0) {
315 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
316 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
317 to);
318 }
319 } else if (old_iscan)
320 mgmt_discoverable(hdev, 0);
321
322 if (param & SCAN_PAGE) {
323 set_bit(HCI_PSCAN, &hdev->flags);
324 if (!old_pscan)
325 mgmt_connectable(hdev, 1);
326 } else if (old_pscan)
327 mgmt_connectable(hdev, 0);
328
329done:
330 hci_dev_unlock(hdev);
331 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
332}
333
334static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
335{
336 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
337
338 BT_DBG("%s status 0x%x", hdev->name, rp->status);
339
340 if (rp->status)
341 return;
342
343 memcpy(hdev->dev_class, rp->dev_class, 3);
344
345 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
346 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
347}
348
349static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
350{
351 __u8 status = *((__u8 *) skb->data);
352 void *sent;
353
354 BT_DBG("%s status 0x%x", hdev->name, status);
355
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
357 if (!sent)
358 return;
359
360 hci_dev_lock(hdev);
361
362 if (status == 0)
363 memcpy(hdev->dev_class, sent, 3);
364
365 if (test_bit(HCI_MGMT, &hdev->dev_flags))
366 mgmt_set_class_of_dev_complete(hdev, sent, status);
367
368 hci_dev_unlock(hdev);
369}
370
371static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
372{
373 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
374 __u16 setting;
375
376 BT_DBG("%s status 0x%x", hdev->name, rp->status);
377
378 if (rp->status)
379 return;
380
381 setting = __le16_to_cpu(rp->voice_setting);
382
383 if (hdev->voice_setting == setting)
384 return;
385
386 hdev->voice_setting = setting;
387
388 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
389
390 if (hdev->notify)
391 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
392}
393
394static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
395{
396 __u8 status = *((__u8 *) skb->data);
397 __u16 setting;
398 void *sent;
399
400 BT_DBG("%s status 0x%x", hdev->name, status);
401
402 if (status)
403 return;
404
405 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
406 if (!sent)
407 return;
408
409 setting = get_unaligned_le16(sent);
410
411 if (hdev->voice_setting == setting)
412 return;
413
414 hdev->voice_setting = setting;
415
416 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
417
418 if (hdev->notify)
419 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
420}
421
422static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
423{
424 __u8 status = *((__u8 *) skb->data);
425
426 BT_DBG("%s status 0x%x", hdev->name, status);
427
428 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
429}
430
431static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
432{
433 __u8 status = *((__u8 *) skb->data);
434 void *sent;
435
436 BT_DBG("%s status 0x%x", hdev->name, status);
437
438 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
439 if (!sent)
440 return;
441
442 if (test_bit(HCI_MGMT, &hdev->dev_flags))
443 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
444 else if (!status) {
445 if (*((u8 *) sent))
446 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
447 else
448 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
449 }
450}
451
452static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
453{
454 if (hdev->features[6] & LMP_EXT_INQ)
455 return 2;
456
457 if (hdev->features[3] & LMP_RSSI_INQ)
458 return 1;
459
460 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
461 hdev->lmp_subver == 0x0757)
462 return 1;
463
464 if (hdev->manufacturer == 15) {
465 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
466 return 1;
467 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
468 return 1;
469 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
470 return 1;
471 }
472
473 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
474 hdev->lmp_subver == 0x1805)
475 return 1;
476
477 return 0;
478}
479
480static void hci_setup_inquiry_mode(struct hci_dev *hdev)
481{
482 u8 mode;
483
484 mode = hci_get_inquiry_mode(hdev);
485
486 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
487}
488
489static void hci_setup_event_mask(struct hci_dev *hdev)
490{
491 /* The second byte is 0xff instead of 0x9f (two reserved bits
492 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
493 * command otherwise */
494 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
495
496 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
497 * any event mask for pre 1.2 devices */
498 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
499 return;
500
501 events[4] |= 0x01; /* Flow Specification Complete */
502 events[4] |= 0x02; /* Inquiry Result with RSSI */
503 events[4] |= 0x04; /* Read Remote Extended Features Complete */
504 events[5] |= 0x08; /* Synchronous Connection Complete */
505 events[5] |= 0x10; /* Synchronous Connection Changed */
506
507 if (hdev->features[3] & LMP_RSSI_INQ)
508 events[4] |= 0x04; /* Inquiry Result with RSSI */
509
510 if (hdev->features[5] & LMP_SNIFF_SUBR)
511 events[5] |= 0x20; /* Sniff Subrating */
512
513 if (hdev->features[5] & LMP_PAUSE_ENC)
514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
515
516 if (hdev->features[6] & LMP_EXT_INQ)
517 events[5] |= 0x40; /* Extended Inquiry Result */
518
519 if (hdev->features[6] & LMP_NO_FLUSH)
520 events[7] |= 0x01; /* Enhanced Flush Complete */
521
522 if (hdev->features[7] & LMP_LSTO)
523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
524
525 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
526 events[6] |= 0x01; /* IO Capability Request */
527 events[6] |= 0x02; /* IO Capability Response */
528 events[6] |= 0x04; /* User Confirmation Request */
529 events[6] |= 0x08; /* User Passkey Request */
530 events[6] |= 0x10; /* Remote OOB Data Request */
531 events[6] |= 0x20; /* Simple Pairing Complete */
532 events[7] |= 0x04; /* User Passkey Notification */
533 events[7] |= 0x08; /* Keypress Notification */
534 events[7] |= 0x10; /* Remote Host Supported
535 * Features Notification */
536 }
537
538 if (hdev->features[4] & LMP_LE)
539 events[7] |= 0x20; /* LE Meta-Event */
540
541 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
542}
543
544static void hci_setup(struct hci_dev *hdev)
545{
546 if (hdev->dev_type != HCI_BREDR)
547 return;
548
549 hci_setup_event_mask(hdev);
550
551 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
552 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
553
554 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
555 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
556 u8 mode = 0x01;
557 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
558 sizeof(mode), &mode);
559 } else {
560 struct hci_cp_write_eir cp;
561
562 memset(hdev->eir, 0, sizeof(hdev->eir));
563 memset(&cp, 0, sizeof(cp));
564
565 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
566 }
567 }
568
569 if (hdev->features[3] & LMP_RSSI_INQ)
570 hci_setup_inquiry_mode(hdev);
571
572 if (hdev->features[7] & LMP_INQ_TX_PWR)
573 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
574
575 if (hdev->features[7] & LMP_EXTFEATURES) {
576 struct hci_cp_read_local_ext_features cp;
577
578 cp.page = 0x01;
579 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
580 &cp);
581 }
582
583 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
584 u8 enable = 1;
585 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
586 &enable);
587 }
588}
589
590static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591{
592 struct hci_rp_read_local_version *rp = (void *) skb->data;
593
594 BT_DBG("%s status 0x%x", hdev->name, rp->status);
595
596 if (rp->status)
597 goto done;
598
599 hdev->hci_ver = rp->hci_ver;
600 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
601 hdev->lmp_ver = rp->lmp_ver;
602 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
603 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
604
605 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
606 hdev->manufacturer,
607 hdev->hci_ver, hdev->hci_rev);
608
609 if (test_bit(HCI_INIT, &hdev->flags))
610 hci_setup(hdev);
611
612done:
613 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
614}
615
616static void hci_setup_link_policy(struct hci_dev *hdev)
617{
618 u16 link_policy = 0;
619
620 if (hdev->features[0] & LMP_RSWITCH)
621 link_policy |= HCI_LP_RSWITCH;
622 if (hdev->features[0] & LMP_HOLD)
623 link_policy |= HCI_LP_HOLD;
624 if (hdev->features[0] & LMP_SNIFF)
625 link_policy |= HCI_LP_SNIFF;
626 if (hdev->features[1] & LMP_PARK)
627 link_policy |= HCI_LP_PARK;
628
629 link_policy = cpu_to_le16(link_policy);
630 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(link_policy),
631 &link_policy);
632}
633
634static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
635{
636 struct hci_rp_read_local_commands *rp = (void *) skb->data;
637
638 BT_DBG("%s status 0x%x", hdev->name, rp->status);
639
640 if (rp->status)
641 goto done;
642
643 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
644
645 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
646 hci_setup_link_policy(hdev);
647
648done:
649 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
650}
651
652static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
653{
654 struct hci_rp_read_local_features *rp = (void *) skb->data;
655
656 BT_DBG("%s status 0x%x", hdev->name, rp->status);
657
658 if (rp->status)
659 return;
660
661 memcpy(hdev->features, rp->features, 8);
662
663 /* Adjust default settings according to features
664 * supported by device. */
665
666 if (hdev->features[0] & LMP_3SLOT)
667 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
668
669 if (hdev->features[0] & LMP_5SLOT)
670 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
671
672 if (hdev->features[1] & LMP_HV2) {
673 hdev->pkt_type |= (HCI_HV2);
674 hdev->esco_type |= (ESCO_HV2);
675 }
676
677 if (hdev->features[1] & LMP_HV3) {
678 hdev->pkt_type |= (HCI_HV3);
679 hdev->esco_type |= (ESCO_HV3);
680 }
681
682 if (hdev->features[3] & LMP_ESCO)
683 hdev->esco_type |= (ESCO_EV3);
684
685 if (hdev->features[4] & LMP_EV4)
686 hdev->esco_type |= (ESCO_EV4);
687
688 if (hdev->features[4] & LMP_EV5)
689 hdev->esco_type |= (ESCO_EV5);
690
691 if (hdev->features[5] & LMP_EDR_ESCO_2M)
692 hdev->esco_type |= (ESCO_2EV3);
693
694 if (hdev->features[5] & LMP_EDR_ESCO_3M)
695 hdev->esco_type |= (ESCO_3EV3);
696
697 if (hdev->features[5] & LMP_EDR_3S_ESCO)
698 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
699
700 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
701 hdev->features[0], hdev->features[1],
702 hdev->features[2], hdev->features[3],
703 hdev->features[4], hdev->features[5],
704 hdev->features[6], hdev->features[7]);
705}
706
707static void hci_set_le_support(struct hci_dev *hdev)
708{
709 struct hci_cp_write_le_host_supported cp;
710
711 memset(&cp, 0, sizeof(cp));
712
713 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
714 cp.le = 1;
715 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
716 }
717
718 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
719 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
720 &cp);
721}
722
723static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
724 struct sk_buff *skb)
725{
726 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
727
728 BT_DBG("%s status 0x%x", hdev->name, rp->status);
729
730 if (rp->status)
731 goto done;
732
733 switch (rp->page) {
734 case 0:
735 memcpy(hdev->features, rp->features, 8);
736 break;
737 case 1:
738 memcpy(hdev->host_features, rp->features, 8);
739 break;
740 }
741
742 if (test_bit(HCI_INIT, &hdev->flags) && hdev->features[4] & LMP_LE)
743 hci_set_le_support(hdev);
744
745done:
746 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
747}
748
749static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
750 struct sk_buff *skb)
751{
752 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
753
754 BT_DBG("%s status 0x%x", hdev->name, rp->status);
755
756 if (rp->status)
757 return;
758
759 hdev->flow_ctl_mode = rp->mode;
760
761 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
762}
763
764static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765{
766 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767
768 BT_DBG("%s status 0x%x", hdev->name, rp->status);
769
770 if (rp->status)
771 return;
772
773 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
774 hdev->sco_mtu = rp->sco_mtu;
775 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
776 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777
778 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
779 hdev->sco_mtu = 64;
780 hdev->sco_pkts = 8;
781 }
782
783 hdev->acl_cnt = hdev->acl_pkts;
784 hdev->sco_cnt = hdev->sco_pkts;
785
786 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
787 hdev->acl_mtu, hdev->acl_pkts,
788 hdev->sco_mtu, hdev->sco_pkts);
789}
790
791static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
792{
793 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
794
795 BT_DBG("%s status 0x%x", hdev->name, rp->status);
796
797 if (!rp->status)
798 bacpy(&hdev->bdaddr, &rp->bdaddr);
799
800 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
801}
802
803static void hci_cc_read_data_block_size(struct hci_dev *hdev,
804 struct sk_buff *skb)
805{
806 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
807
808 BT_DBG("%s status 0x%x", hdev->name, rp->status);
809
810 if (rp->status)
811 return;
812
813 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
814 hdev->block_len = __le16_to_cpu(rp->block_len);
815 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
816
817 hdev->block_cnt = hdev->num_blocks;
818
819 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
820 hdev->block_cnt, hdev->block_len);
821
822 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
823}
824
825static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
826{
827 __u8 status = *((__u8 *) skb->data);
828
829 BT_DBG("%s status 0x%x", hdev->name, status);
830
831 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
832}
833
834static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
835 struct sk_buff *skb)
836{
837 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
838
839 BT_DBG("%s status 0x%x", hdev->name, rp->status);
840
841 if (rp->status)
842 return;
843
844 hdev->amp_status = rp->amp_status;
845 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
846 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
847 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
848 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
849 hdev->amp_type = rp->amp_type;
850 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
851 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
852 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
853 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
854
855 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
856}
857
858static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
859 struct sk_buff *skb)
860{
861 __u8 status = *((__u8 *) skb->data);
862
863 BT_DBG("%s status 0x%x", hdev->name, status);
864
865 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
866}
867
868static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
869{
870 __u8 status = *((__u8 *) skb->data);
871
872 BT_DBG("%s status 0x%x", hdev->name, status);
873
874 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
875}
876
877static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
878 struct sk_buff *skb)
879{
880 __u8 status = *((__u8 *) skb->data);
881
882 BT_DBG("%s status 0x%x", hdev->name, status);
883
884 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
885}
886
887static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
888 struct sk_buff *skb)
889{
890 __u8 status = *((__u8 *) skb->data);
891
892 BT_DBG("%s status 0x%x", hdev->name, status);
893
894 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
895}
896
897static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
898{
899 __u8 status = *((__u8 *) skb->data);
900
901 BT_DBG("%s status 0x%x", hdev->name, status);
902
903 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
904}
905
906static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
907{
908 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
909 struct hci_cp_pin_code_reply *cp;
910 struct hci_conn *conn;
911
912 BT_DBG("%s status 0x%x", hdev->name, rp->status);
913
914 hci_dev_lock(hdev);
915
916 if (test_bit(HCI_MGMT, &hdev->dev_flags))
917 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
918
919 if (rp->status != 0)
920 goto unlock;
921
922 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
923 if (!cp)
924 goto unlock;
925
926 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
927 if (conn)
928 conn->pin_length = cp->pin_len;
929
930unlock:
931 hci_dev_unlock(hdev);
932}
933
934static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
935{
936 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
937
938 BT_DBG("%s status 0x%x", hdev->name, rp->status);
939
940 hci_dev_lock(hdev);
941
942 if (test_bit(HCI_MGMT, &hdev->dev_flags))
943 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
944 rp->status);
945
946 hci_dev_unlock(hdev);
947}
948
949static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
950 struct sk_buff *skb)
951{
952 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
953
954 BT_DBG("%s status 0x%x", hdev->name, rp->status);
955
956 if (rp->status)
957 return;
958
959 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
960 hdev->le_pkts = rp->le_max_pkt;
961
962 hdev->le_cnt = hdev->le_pkts;
963
964 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
965
966 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
967}
968
969static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
970{
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972
973 BT_DBG("%s status 0x%x", hdev->name, rp->status);
974
975 hci_dev_lock(hdev);
976
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
979 rp->status);
980
981 hci_dev_unlock(hdev);
982}
983
984static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
985 struct sk_buff *skb)
986{
987 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988
989 BT_DBG("%s status 0x%x", hdev->name, rp->status);
990
991 hci_dev_lock(hdev);
992
993 if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
995 ACL_LINK, 0, rp->status);
996
997 hci_dev_unlock(hdev);
998}
999
1000static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1001{
1002 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1003
1004 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1005
1006 hci_dev_lock(hdev);
1007
1008 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1009 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1010 0, rp->status);
1011
1012 hci_dev_unlock(hdev);
1013}
1014
1015static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1016 struct sk_buff *skb)
1017{
1018 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1019
1020 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1021
1022 hci_dev_lock(hdev);
1023
1024 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1025 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1026 ACL_LINK, 0, rp->status);
1027
1028 hci_dev_unlock(hdev);
1029}
1030
1031static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1032 struct sk_buff *skb)
1033{
1034 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1035
1036 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1037
1038 hci_dev_lock(hdev);
1039 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1040 rp->randomizer, rp->status);
1041 hci_dev_unlock(hdev);
1042}
1043
1044static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1045{
1046 __u8 status = *((__u8 *) skb->data);
1047
1048 BT_DBG("%s status 0x%x", hdev->name, status);
1049
1050 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1051
1052 if (status) {
1053 hci_dev_lock(hdev);
1054 mgmt_start_discovery_failed(hdev, status);
1055 hci_dev_unlock(hdev);
1056 return;
1057 }
1058}
1059
1060static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1061 struct sk_buff *skb)
1062{
1063 struct hci_cp_le_set_scan_enable *cp;
1064 __u8 status = *((__u8 *) skb->data);
1065
1066 BT_DBG("%s status 0x%x", hdev->name, status);
1067
1068 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1069 if (!cp)
1070 return;
1071
1072 switch (cp->enable) {
1073 case LE_SCANNING_ENABLED:
1074 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1075
1076 if (status) {
1077 hci_dev_lock(hdev);
1078 mgmt_start_discovery_failed(hdev, status);
1079 hci_dev_unlock(hdev);
1080 return;
1081 }
1082
1083 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1084
1085 cancel_delayed_work_sync(&hdev->adv_work);
1086
1087 hci_dev_lock(hdev);
1088 hci_adv_entries_clear(hdev);
1089 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1090 hci_dev_unlock(hdev);
1091 break;
1092
1093 case LE_SCANNING_DISABLED:
1094 if (status)
1095 return;
1096
1097 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1098
1099 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1100
1101 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1102 mgmt_interleaved_discovery(hdev);
1103 } else {
1104 hci_dev_lock(hdev);
1105 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1106 hci_dev_unlock(hdev);
1107 }
1108
1109 break;
1110
1111 default:
1112 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1113 break;
1114 }
1115}
1116
1117static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1118{
1119 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1120
1121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1122
1123 if (rp->status)
1124 return;
1125
1126 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1127}
1128
1129static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1130{
1131 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1132
1133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1134
1135 if (rp->status)
1136 return;
1137
1138 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1139}
1140
1141static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1143{
1144 struct hci_cp_write_le_host_supported *sent;
1145 __u8 status = *((__u8 *) skb->data);
1146
1147 BT_DBG("%s status 0x%x", hdev->name, status);
1148
1149 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1150 if (!sent)
1151 return;
1152
1153 if (!status) {
1154 if (sent->le)
1155 hdev->host_features[0] |= LMP_HOST_LE;
1156 else
1157 hdev->host_features[0] &= ~LMP_HOST_LE;
1158 }
1159
1160 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1161 !test_bit(HCI_INIT, &hdev->flags))
1162 mgmt_le_enable_complete(hdev, sent->le, status);
1163
1164 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1165}
1166
1167static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1168{
1169 BT_DBG("%s status 0x%x", hdev->name, status);
1170
1171 if (status) {
1172 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1173 hci_conn_check_pending(hdev);
1174 hci_dev_lock(hdev);
1175 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1176 mgmt_start_discovery_failed(hdev, status);
1177 hci_dev_unlock(hdev);
1178 return;
1179 }
1180
1181 set_bit(HCI_INQUIRY, &hdev->flags);
1182
1183 hci_dev_lock(hdev);
1184 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1185 hci_dev_unlock(hdev);
1186}
1187
1188static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1189{
1190 struct hci_cp_create_conn *cp;
1191 struct hci_conn *conn;
1192
1193 BT_DBG("%s status 0x%x", hdev->name, status);
1194
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1196 if (!cp)
1197 return;
1198
1199 hci_dev_lock(hdev);
1200
1201 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1202
1203 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1204
1205 if (status) {
1206 if (conn && conn->state == BT_CONNECT) {
1207 if (status != 0x0c || conn->attempt > 2) {
1208 conn->state = BT_CLOSED;
1209 hci_proto_connect_cfm(conn, status);
1210 hci_conn_del(conn);
1211 } else
1212 conn->state = BT_CONNECT2;
1213 }
1214 } else {
1215 if (!conn) {
1216 conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
1217 if (conn) {
1218 conn->out = true;
1219 conn->link_mode |= HCI_LM_MASTER;
1220 } else
1221 BT_ERR("No memory for new connection");
1222 }
1223 }
1224
1225 hci_dev_unlock(hdev);
1226}
1227
1228static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1229{
1230 struct hci_cp_add_sco *cp;
1231 struct hci_conn *acl, *sco;
1232 __u16 handle;
1233
1234 BT_DBG("%s status 0x%x", hdev->name, status);
1235
1236 if (!status)
1237 return;
1238
1239 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1240 if (!cp)
1241 return;
1242
1243 handle = __le16_to_cpu(cp->handle);
1244
1245 BT_DBG("%s handle %d", hdev->name, handle);
1246
1247 hci_dev_lock(hdev);
1248
1249 acl = hci_conn_hash_lookup_handle(hdev, handle);
1250 if (acl) {
1251 sco = acl->link;
1252 if (sco) {
1253 sco->state = BT_CLOSED;
1254
1255 hci_proto_connect_cfm(sco, status);
1256 hci_conn_del(sco);
1257 }
1258 }
1259
1260 hci_dev_unlock(hdev);
1261}
1262
1263static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1264{
1265 struct hci_cp_auth_requested *cp;
1266 struct hci_conn *conn;
1267
1268 BT_DBG("%s status 0x%x", hdev->name, status);
1269
1270 if (!status)
1271 return;
1272
1273 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1274 if (!cp)
1275 return;
1276
1277 hci_dev_lock(hdev);
1278
1279 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1280 if (conn) {
1281 if (conn->state == BT_CONFIG) {
1282 hci_proto_connect_cfm(conn, status);
1283 hci_conn_put(conn);
1284 }
1285 }
1286
1287 hci_dev_unlock(hdev);
1288}
1289
1290static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1291{
1292 struct hci_cp_set_conn_encrypt *cp;
1293 struct hci_conn *conn;
1294
1295 BT_DBG("%s status 0x%x", hdev->name, status);
1296
1297 if (!status)
1298 return;
1299
1300 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1301 if (!cp)
1302 return;
1303
1304 hci_dev_lock(hdev);
1305
1306 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1307 if (conn) {
1308 if (conn->state == BT_CONFIG) {
1309 hci_proto_connect_cfm(conn, status);
1310 hci_conn_put(conn);
1311 }
1312 }
1313
1314 hci_dev_unlock(hdev);
1315}
1316
1317static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1318 struct hci_conn *conn)
1319{
1320 if (conn->state != BT_CONFIG || !conn->out)
1321 return 0;
1322
1323 if (conn->pending_sec_level == BT_SECURITY_SDP)
1324 return 0;
1325
1326 /* Only request authentication for SSP connections or non-SSP
1327 * devices with sec_level HIGH or if MITM protection is requested */
1328 if (!hci_conn_ssp_enabled(conn) &&
1329 conn->pending_sec_level != BT_SECURITY_HIGH &&
1330 !(conn->auth_type & 0x01))
1331 return 0;
1332
1333 return 1;
1334}
1335
1336static inline int hci_resolve_name(struct hci_dev *hdev,
1337 struct inquiry_entry *e)
1338{
1339 struct hci_cp_remote_name_req cp;
1340
1341 memset(&cp, 0, sizeof(cp));
1342
1343 bacpy(&cp.bdaddr, &e->data.bdaddr);
1344 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1345 cp.pscan_mode = e->data.pscan_mode;
1346 cp.clock_offset = e->data.clock_offset;
1347
1348 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1349}
1350
1351static bool hci_resolve_next_name(struct hci_dev *hdev)
1352{
1353 struct discovery_state *discov = &hdev->discovery;
1354 struct inquiry_entry *e;
1355
1356 if (list_empty(&discov->resolve))
1357 return false;
1358
1359 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1360 if (!e)
1361 return false;
1362
1363 if (hci_resolve_name(hdev, e) == 0) {
1364 e->name_state = NAME_PENDING;
1365 return true;
1366 }
1367
1368 return false;
1369}
1370
1371static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1372 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1373{
1374 struct discovery_state *discov = &hdev->discovery;
1375 struct inquiry_entry *e;
1376
1377 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1378 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1379 name_len, conn->dev_class);
1380
1381 if (discov->state == DISCOVERY_STOPPED)
1382 return;
1383
1384 if (discov->state == DISCOVERY_STOPPING)
1385 goto discov_complete;
1386
1387 if (discov->state != DISCOVERY_RESOLVING)
1388 return;
1389
1390 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1391 /* If the device was not found in a list of found devices names of which
1392 * are pending. there is no need to continue resolving a next name as it
1393 * will be done upon receiving another Remote Name Request Complete
1394 * Event */
1395 if (!e)
1396 return;
1397
1398 list_del(&e->list);
1399 if (name) {
1400 e->name_state = NAME_KNOWN;
1401 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1402 e->data.rssi, name, name_len);
1403 } else {
1404 e->name_state = NAME_NOT_KNOWN;
1405 }
1406
1407 if (hci_resolve_next_name(hdev))
1408 return;
1409
1410discov_complete:
1411 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1412}
1413
1414static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1415{
1416 struct hci_cp_remote_name_req *cp;
1417 struct hci_conn *conn;
1418
1419 BT_DBG("%s status 0x%x", hdev->name, status);
1420
1421 /* If successful wait for the name req complete event before
1422 * checking for the need to do authentication */
1423 if (!status)
1424 return;
1425
1426 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1427 if (!cp)
1428 return;
1429
1430 hci_dev_lock(hdev);
1431
1432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1433
1434 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1435 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1436
1437 if (!conn)
1438 goto unlock;
1439
1440 if (!hci_outgoing_auth_needed(hdev, conn))
1441 goto unlock;
1442
1443 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1444 struct hci_cp_auth_requested cp;
1445 cp.handle = __cpu_to_le16(conn->handle);
1446 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1447 }
1448
1449unlock:
1450 hci_dev_unlock(hdev);
1451}
1452
1453static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1454{
1455 struct hci_cp_read_remote_features *cp;
1456 struct hci_conn *conn;
1457
1458 BT_DBG("%s status 0x%x", hdev->name, status);
1459
1460 if (!status)
1461 return;
1462
1463 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1464 if (!cp)
1465 return;
1466
1467 hci_dev_lock(hdev);
1468
1469 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1470 if (conn) {
1471 if (conn->state == BT_CONFIG) {
1472 hci_proto_connect_cfm(conn, status);
1473 hci_conn_put(conn);
1474 }
1475 }
1476
1477 hci_dev_unlock(hdev);
1478}
1479
1480static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1481{
1482 struct hci_cp_read_remote_ext_features *cp;
1483 struct hci_conn *conn;
1484
1485 BT_DBG("%s status 0x%x", hdev->name, status);
1486
1487 if (!status)
1488 return;
1489
1490 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1491 if (!cp)
1492 return;
1493
1494 hci_dev_lock(hdev);
1495
1496 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1497 if (conn) {
1498 if (conn->state == BT_CONFIG) {
1499 hci_proto_connect_cfm(conn, status);
1500 hci_conn_put(conn);
1501 }
1502 }
1503
1504 hci_dev_unlock(hdev);
1505}
1506
1507static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1508{
1509 struct hci_cp_setup_sync_conn *cp;
1510 struct hci_conn *acl, *sco;
1511 __u16 handle;
1512
1513 BT_DBG("%s status 0x%x", hdev->name, status);
1514
1515 if (!status)
1516 return;
1517
1518 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1519 if (!cp)
1520 return;
1521
1522 handle = __le16_to_cpu(cp->handle);
1523
1524 BT_DBG("%s handle %d", hdev->name, handle);
1525
1526 hci_dev_lock(hdev);
1527
1528 acl = hci_conn_hash_lookup_handle(hdev, handle);
1529 if (acl) {
1530 sco = acl->link;
1531 if (sco) {
1532 sco->state = BT_CLOSED;
1533
1534 hci_proto_connect_cfm(sco, status);
1535 hci_conn_del(sco);
1536 }
1537 }
1538
1539 hci_dev_unlock(hdev);
1540}
1541
1542static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1543{
1544 struct hci_cp_sniff_mode *cp;
1545 struct hci_conn *conn;
1546
1547 BT_DBG("%s status 0x%x", hdev->name, status);
1548
1549 if (!status)
1550 return;
1551
1552 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1553 if (!cp)
1554 return;
1555
1556 hci_dev_lock(hdev);
1557
1558 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1559 if (conn) {
1560 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1561
1562 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1563 hci_sco_setup(conn, status);
1564 }
1565
1566 hci_dev_unlock(hdev);
1567}
1568
1569static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1570{
1571 struct hci_cp_exit_sniff_mode *cp;
1572 struct hci_conn *conn;
1573
1574 BT_DBG("%s status 0x%x", hdev->name, status);
1575
1576 if (!status)
1577 return;
1578
1579 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1580 if (!cp)
1581 return;
1582
1583 hci_dev_lock(hdev);
1584
1585 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1586 if (conn) {
1587 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1588
1589 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1590 hci_sco_setup(conn, status);
1591 }
1592
1593 hci_dev_unlock(hdev);
1594}
1595
1596static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1597{
1598 struct hci_cp_disconnect *cp;
1599 struct hci_conn *conn;
1600
1601 if (!status)
1602 return;
1603
1604 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1605 if (!cp)
1606 return;
1607
1608 hci_dev_lock(hdev);
1609
1610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1611 if (conn)
1612 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1613 conn->dst_type, status);
1614
1615 hci_dev_unlock(hdev);
1616}
1617
1618static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1619{
1620 struct hci_cp_le_create_conn *cp;
1621 struct hci_conn *conn;
1622
1623 BT_DBG("%s status 0x%x", hdev->name, status);
1624
1625 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1626 if (!cp)
1627 return;
1628
1629 hci_dev_lock(hdev);
1630
1631 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1632
1633 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1634 conn);
1635
1636 if (status) {
1637 if (conn && conn->state == BT_CONNECT) {
1638 conn->state = BT_CLOSED;
1639 hci_proto_connect_cfm(conn, status);
1640 hci_conn_del(conn);
1641 }
1642 } else {
1643 if (!conn) {
1644 conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
1645 if (conn) {
1646 conn->dst_type = cp->peer_addr_type;
1647 conn->out = true;
1648 } else {
1649 BT_ERR("No memory for new connection");
1650 }
1651 }
1652 }
1653
1654 hci_dev_unlock(hdev);
1655}
1656
1657static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1658{
1659 BT_DBG("%s status 0x%x", hdev->name, status);
1660}
1661
1662static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1663{
1664 __u8 status = *((__u8 *) skb->data);
1665 struct discovery_state *discov = &hdev->discovery;
1666 struct inquiry_entry *e;
1667
1668 BT_DBG("%s status %d", hdev->name, status);
1669
1670 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1671
1672 hci_conn_check_pending(hdev);
1673
1674 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1675 return;
1676
1677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1678 return;
1679
1680 hci_dev_lock(hdev);
1681
1682 if (discov->state != DISCOVERY_FINDING)
1683 goto unlock;
1684
1685 if (list_empty(&discov->resolve)) {
1686 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1687 goto unlock;
1688 }
1689
1690 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1691 if (e && hci_resolve_name(hdev, e) == 0) {
1692 e->name_state = NAME_PENDING;
1693 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1694 } else {
1695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696 }
1697
1698unlock:
1699 hci_dev_unlock(hdev);
1700}
1701
1702static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1703{
1704 struct inquiry_data data;
1705 struct inquiry_info *info = (void *) (skb->data + 1);
1706 int num_rsp = *((__u8 *) skb->data);
1707
1708 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1709
1710 if (!num_rsp)
1711 return;
1712
1713 hci_dev_lock(hdev);
1714
1715 for (; num_rsp; num_rsp--, info++) {
1716 bool name_known, ssp;
1717
1718 bacpy(&data.bdaddr, &info->bdaddr);
1719 data.pscan_rep_mode = info->pscan_rep_mode;
1720 data.pscan_period_mode = info->pscan_period_mode;
1721 data.pscan_mode = info->pscan_mode;
1722 memcpy(data.dev_class, info->dev_class, 3);
1723 data.clock_offset = info->clock_offset;
1724 data.rssi = 0x00;
1725 data.ssp_mode = 0x00;
1726
1727 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1728 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1729 info->dev_class, 0, !name_known, ssp, NULL,
1730 0);
1731 }
1732
1733 hci_dev_unlock(hdev);
1734}
1735
1736static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1737{
1738 struct hci_ev_conn_complete *ev = (void *) skb->data;
1739 struct hci_conn *conn;
1740
1741 BT_DBG("%s", hdev->name);
1742
1743 hci_dev_lock(hdev);
1744
1745 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1746 if (!conn) {
1747 if (ev->link_type != SCO_LINK)
1748 goto unlock;
1749
1750 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1751 if (!conn)
1752 goto unlock;
1753
1754 conn->type = SCO_LINK;
1755 }
1756
1757 if (!ev->status) {
1758 conn->handle = __le16_to_cpu(ev->handle);
1759
1760 if (conn->type == ACL_LINK) {
1761 conn->state = BT_CONFIG;
1762 hci_conn_hold(conn);
1763
1764 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1765 !hci_find_link_key(hdev, &ev->bdaddr))
1766 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1767 else
1768 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1769 } else
1770 conn->state = BT_CONNECTED;
1771
1772 hci_conn_hold_device(conn);
1773 hci_conn_add_sysfs(conn);
1774
1775 if (test_bit(HCI_AUTH, &hdev->flags))
1776 conn->link_mode |= HCI_LM_AUTH;
1777
1778 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1779 conn->link_mode |= HCI_LM_ENCRYPT;
1780
1781 /* Get remote features */
1782 if (conn->type == ACL_LINK) {
1783 struct hci_cp_read_remote_features cp;
1784 cp.handle = ev->handle;
1785 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1786 sizeof(cp), &cp);
1787 }
1788
1789 /* Set packet type for incoming connection */
1790 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1791 struct hci_cp_change_conn_ptype cp;
1792 cp.handle = ev->handle;
1793 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1794 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1795 &cp);
1796 }
1797 } else {
1798 conn->state = BT_CLOSED;
1799 if (conn->type == ACL_LINK)
1800 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1801 conn->dst_type, ev->status);
1802 }
1803
1804 if (conn->type == ACL_LINK)
1805 hci_sco_setup(conn, ev->status);
1806
1807 if (ev->status) {
1808 hci_proto_connect_cfm(conn, ev->status);
1809 hci_conn_del(conn);
1810 } else if (ev->link_type != ACL_LINK)
1811 hci_proto_connect_cfm(conn, ev->status);
1812
1813unlock:
1814 hci_dev_unlock(hdev);
1815
1816 hci_conn_check_pending(hdev);
1817}
1818
1819static inline bool is_sco_active(struct hci_dev *hdev)
1820{
1821 if (hci_conn_hash_lookup_state(hdev, SCO_LINK, BT_CONNECTED) ||
1822 (hci_conn_hash_lookup_state(hdev, ESCO_LINK,
1823 BT_CONNECTED)))
1824 return true;
1825 return false;
1826}
1827
1828static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1829{
1830 struct hci_ev_conn_request *ev = (void *) skb->data;
1831 int mask = hdev->link_mode;
1832
1833 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1834 batostr(&ev->bdaddr), ev->link_type);
1835
1836 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1837
1838 if ((mask & HCI_LM_ACCEPT) &&
1839 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1840 /* Connection accepted */
1841 struct inquiry_entry *ie;
1842 struct hci_conn *conn;
1843
1844 hci_dev_lock(hdev);
1845
1846 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1847 if (ie)
1848 memcpy(ie->data.dev_class, ev->dev_class, 3);
1849
1850 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1851 if (!conn) {
1852 /* pkt_type not yet used for incoming connections */
1853 conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
1854 if (!conn) {
1855 BT_ERR("No memory for new connection");
1856 hci_dev_unlock(hdev);
1857 return;
1858 }
1859 }
1860
1861 memcpy(conn->dev_class, ev->dev_class, 3);
1862 conn->state = BT_CONNECT;
1863
1864 hci_dev_unlock(hdev);
1865
1866 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1867 struct hci_cp_accept_conn_req cp;
1868
1869 bacpy(&cp.bdaddr, &ev->bdaddr);
1870
1871 if (lmp_rswitch_capable(hdev) && ((mask & HCI_LM_MASTER)
1872 || is_sco_active(hdev)))
1873 cp.role = 0x00; /* Become master */
1874 else
1875 cp.role = 0x01; /* Remain slave */
1876
1877 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1878 &cp);
1879 } else {
1880 struct hci_cp_accept_sync_conn_req cp;
1881
1882 bacpy(&cp.bdaddr, &ev->bdaddr);
1883 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1884
1885 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1886 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1887 cp.max_latency = cpu_to_le16(0xffff);
1888 cp.content_format = cpu_to_le16(hdev->voice_setting);
1889 cp.retrans_effort = 0xff;
1890
1891 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1892 sizeof(cp), &cp);
1893 }
1894 } else {
1895 /* Connection rejected */
1896 struct hci_cp_reject_conn_req cp;
1897
1898 bacpy(&cp.bdaddr, &ev->bdaddr);
1899 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1900 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1901 }
1902}
1903
1904static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1905{
1906 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1907 struct hci_conn *conn;
1908
1909 BT_DBG("%s status %d", hdev->name, ev->status);
1910
1911 hci_dev_lock(hdev);
1912
1913 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1914 if (!conn)
1915 goto unlock;
1916
1917 if (ev->status == 0)
1918 conn->state = BT_CLOSED;
1919
1920 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1921 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1922 if (ev->status != 0)
1923 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1924 conn->dst_type, ev->status);
1925 else
1926 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1927 conn->dst_type);
1928 }
1929
1930 if (ev->status == 0) {
1931 if (conn->type == ACL_LINK && conn->flush_key)
1932 hci_remove_link_key(hdev, &conn->dst);
1933 hci_proto_disconn_cfm(conn, ev->reason);
1934 hci_conn_del(conn);
1935 }
1936
1937unlock:
1938 hci_dev_unlock(hdev);
1939}
1940
1941static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1942{
1943 struct hci_ev_auth_complete *ev = (void *) skb->data;
1944 struct hci_conn *conn;
1945
1946 BT_DBG("%s status %d", hdev->name, ev->status);
1947
1948 hci_dev_lock(hdev);
1949
1950 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1951 if (!conn)
1952 goto unlock;
1953
1954 if (!ev->status) {
1955 if (!hci_conn_ssp_enabled(conn) &&
1956 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1957 BT_INFO("re-auth of legacy device is not possible.");
1958 } else {
1959 conn->link_mode |= HCI_LM_AUTH;
1960 conn->sec_level = conn->pending_sec_level;
1961 }
1962 } else {
1963 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1964 ev->status);
1965 }
1966
1967 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1968 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1969
1970 if (conn->state == BT_CONFIG) {
1971 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1972 struct hci_cp_set_conn_encrypt cp;
1973 cp.handle = ev->handle;
1974 cp.encrypt = 0x01;
1975 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1976 &cp);
1977 } else {
1978 conn->state = BT_CONNECTED;
1979 hci_proto_connect_cfm(conn, ev->status);
1980 hci_conn_put(conn);
1981 }
1982 } else {
1983 hci_auth_cfm(conn, ev->status);
1984
1985 hci_conn_hold(conn);
1986 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1987 hci_conn_put(conn);
1988 }
1989
1990 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1991 if (!ev->status) {
1992 struct hci_cp_set_conn_encrypt cp;
1993 cp.handle = ev->handle;
1994 cp.encrypt = 0x01;
1995 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1996 &cp);
1997 } else {
1998 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1999 hci_encrypt_cfm(conn, ev->status, 0x00);
2000 }
2001 }
2002
2003unlock:
2004 hci_dev_unlock(hdev);
2005}
2006
2007static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2008{
2009 struct hci_ev_remote_name *ev = (void *) skb->data;
2010 struct hci_conn *conn;
2011
2012 BT_DBG("%s", hdev->name);
2013
2014 hci_conn_check_pending(hdev);
2015
2016 hci_dev_lock(hdev);
2017
2018 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2019
2020 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2021 goto check_auth;
2022
2023 if (ev->status == 0)
2024 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2025 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2026 else
2027 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2028
2029check_auth:
2030 if (!conn)
2031 goto unlock;
2032
2033 if (!hci_outgoing_auth_needed(hdev, conn))
2034 goto unlock;
2035
2036 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2037 struct hci_cp_auth_requested cp;
2038 cp.handle = __cpu_to_le16(conn->handle);
2039 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2040 }
2041
2042unlock:
2043 hci_dev_unlock(hdev);
2044}
2045
2046static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2047{
2048 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2049 struct hci_conn *conn;
2050
2051 BT_DBG("%s status %d", hdev->name, ev->status);
2052
2053 hci_dev_lock(hdev);
2054
2055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2056 if (conn) {
2057 if (!ev->status) {
2058 if (ev->encrypt) {
2059 /* Encryption implies authentication */
2060 conn->link_mode |= HCI_LM_AUTH;
2061 conn->link_mode |= HCI_LM_ENCRYPT;
2062 conn->sec_level = conn->pending_sec_level;
2063 } else
2064 conn->link_mode &= ~HCI_LM_ENCRYPT;
2065 }
2066
2067 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2068
2069 if (ev->status && conn->state == BT_CONNECTED) {
2070 hci_acl_disconn(conn, 0x13);
2071 hci_conn_put(conn);
2072 goto unlock;
2073 }
2074
2075 if (conn->state == BT_CONFIG) {
2076 if (!ev->status)
2077 conn->state = BT_CONNECTED;
2078
2079 hci_proto_connect_cfm(conn, ev->status);
2080 hci_conn_put(conn);
2081 } else
2082 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2083 }
2084
2085unlock:
2086 hci_dev_unlock(hdev);
2087}
2088
2089static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2090{
2091 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2092 struct hci_conn *conn;
2093
2094 BT_DBG("%s status %d", hdev->name, ev->status);
2095
2096 hci_dev_lock(hdev);
2097
2098 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2099 if (conn) {
2100 if (!ev->status)
2101 conn->link_mode |= HCI_LM_SECURE;
2102
2103 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2104
2105 hci_key_change_cfm(conn, ev->status);
2106 }
2107
2108 hci_dev_unlock(hdev);
2109}
2110
2111static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2112{
2113 struct hci_ev_remote_features *ev = (void *) skb->data;
2114 struct hci_conn *conn;
2115
2116 BT_DBG("%s status %d", hdev->name, ev->status);
2117
2118 hci_dev_lock(hdev);
2119
2120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2121 if (!conn)
2122 goto unlock;
2123
2124 if (!ev->status)
2125 memcpy(conn->features, ev->features, 8);
2126
2127 if (conn->state != BT_CONFIG)
2128 goto unlock;
2129
2130 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2131 struct hci_cp_read_remote_ext_features cp;
2132 cp.handle = ev->handle;
2133 cp.page = 0x01;
2134 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2135 sizeof(cp), &cp);
2136 goto unlock;
2137 }
2138
2139 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2140 struct hci_cp_remote_name_req cp;
2141 memset(&cp, 0, sizeof(cp));
2142 bacpy(&cp.bdaddr, &conn->dst);
2143 cp.pscan_rep_mode = 0x02;
2144 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2145 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2146 mgmt_device_connected(hdev, &conn->dst, conn->type,
2147 conn->dst_type, 0, NULL, 0,
2148 conn->dev_class);
2149
2150 if (!hci_outgoing_auth_needed(hdev, conn)) {
2151 conn->state = BT_CONNECTED;
2152 hci_proto_connect_cfm(conn, ev->status);
2153 hci_conn_put(conn);
2154 }
2155
2156unlock:
2157 hci_dev_unlock(hdev);
2158}
2159
2160static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2161{
2162 BT_DBG("%s", hdev->name);
2163}
2164
2165static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2166{
2167 BT_DBG("%s", hdev->name);
2168}
2169
2170static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2171{
2172 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2173 __u16 opcode;
2174
2175 skb_pull(skb, sizeof(*ev));
2176
2177 opcode = __le16_to_cpu(ev->opcode);
2178
2179 switch (opcode) {
2180 case HCI_OP_INQUIRY_CANCEL:
2181 hci_cc_inquiry_cancel(hdev, skb);
2182 break;
2183
2184 case HCI_OP_EXIT_PERIODIC_INQ:
2185 hci_cc_exit_periodic_inq(hdev, skb);
2186 break;
2187
2188 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2189 hci_cc_remote_name_req_cancel(hdev, skb);
2190 break;
2191
2192 case HCI_OP_ROLE_DISCOVERY:
2193 hci_cc_role_discovery(hdev, skb);
2194 break;
2195
2196 case HCI_OP_READ_LINK_POLICY:
2197 hci_cc_read_link_policy(hdev, skb);
2198 break;
2199
2200 case HCI_OP_WRITE_LINK_POLICY:
2201 hci_cc_write_link_policy(hdev, skb);
2202 break;
2203
2204 case HCI_OP_READ_DEF_LINK_POLICY:
2205 hci_cc_read_def_link_policy(hdev, skb);
2206 break;
2207
2208 case HCI_OP_WRITE_DEF_LINK_POLICY:
2209 hci_cc_write_def_link_policy(hdev, skb);
2210 break;
2211
2212 case HCI_OP_RESET:
2213 hci_cc_reset(hdev, skb);
2214 break;
2215
2216 case HCI_OP_WRITE_LOCAL_NAME:
2217 hci_cc_write_local_name(hdev, skb);
2218 break;
2219
2220 case HCI_OP_READ_LOCAL_NAME:
2221 hci_cc_read_local_name(hdev, skb);
2222 break;
2223
2224 case HCI_OP_WRITE_AUTH_ENABLE:
2225 hci_cc_write_auth_enable(hdev, skb);
2226 break;
2227
2228 case HCI_OP_WRITE_ENCRYPT_MODE:
2229 hci_cc_write_encrypt_mode(hdev, skb);
2230 break;
2231
2232 case HCI_OP_WRITE_SCAN_ENABLE:
2233 hci_cc_write_scan_enable(hdev, skb);
2234 break;
2235
2236 case HCI_OP_READ_CLASS_OF_DEV:
2237 hci_cc_read_class_of_dev(hdev, skb);
2238 break;
2239
2240 case HCI_OP_WRITE_CLASS_OF_DEV:
2241 hci_cc_write_class_of_dev(hdev, skb);
2242 break;
2243
2244 case HCI_OP_READ_VOICE_SETTING:
2245 hci_cc_read_voice_setting(hdev, skb);
2246 break;
2247
2248 case HCI_OP_WRITE_VOICE_SETTING:
2249 hci_cc_write_voice_setting(hdev, skb);
2250 break;
2251
2252 case HCI_OP_HOST_BUFFER_SIZE:
2253 hci_cc_host_buffer_size(hdev, skb);
2254 break;
2255
2256 case HCI_OP_WRITE_SSP_MODE:
2257 hci_cc_write_ssp_mode(hdev, skb);
2258 break;
2259
2260 case HCI_OP_READ_LOCAL_VERSION:
2261 hci_cc_read_local_version(hdev, skb);
2262 break;
2263
2264 case HCI_OP_READ_LOCAL_COMMANDS:
2265 hci_cc_read_local_commands(hdev, skb);
2266 break;
2267
2268 case HCI_OP_READ_LOCAL_FEATURES:
2269 hci_cc_read_local_features(hdev, skb);
2270 break;
2271
2272 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2273 hci_cc_read_local_ext_features(hdev, skb);
2274 break;
2275
2276 case HCI_OP_READ_BUFFER_SIZE:
2277 hci_cc_read_buffer_size(hdev, skb);
2278 break;
2279
2280 case HCI_OP_READ_BD_ADDR:
2281 hci_cc_read_bd_addr(hdev, skb);
2282 break;
2283
2284 case HCI_OP_READ_DATA_BLOCK_SIZE:
2285 hci_cc_read_data_block_size(hdev, skb);
2286 break;
2287
2288 case HCI_OP_WRITE_CA_TIMEOUT:
2289 hci_cc_write_ca_timeout(hdev, skb);
2290 break;
2291
2292 case HCI_OP_READ_FLOW_CONTROL_MODE:
2293 hci_cc_read_flow_control_mode(hdev, skb);
2294 break;
2295
2296 case HCI_OP_READ_LOCAL_AMP_INFO:
2297 hci_cc_read_local_amp_info(hdev, skb);
2298 break;
2299
2300 case HCI_OP_DELETE_STORED_LINK_KEY:
2301 hci_cc_delete_stored_link_key(hdev, skb);
2302 break;
2303
2304 case HCI_OP_SET_EVENT_MASK:
2305 hci_cc_set_event_mask(hdev, skb);
2306 break;
2307
2308 case HCI_OP_WRITE_INQUIRY_MODE:
2309 hci_cc_write_inquiry_mode(hdev, skb);
2310 break;
2311
2312 case HCI_OP_READ_INQ_RSP_TX_POWER:
2313 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2314 break;
2315
2316 case HCI_OP_SET_EVENT_FLT:
2317 hci_cc_set_event_flt(hdev, skb);
2318 break;
2319
2320 case HCI_OP_PIN_CODE_REPLY:
2321 hci_cc_pin_code_reply(hdev, skb);
2322 break;
2323
2324 case HCI_OP_PIN_CODE_NEG_REPLY:
2325 hci_cc_pin_code_neg_reply(hdev, skb);
2326 break;
2327
2328 case HCI_OP_READ_LOCAL_OOB_DATA:
2329 hci_cc_read_local_oob_data_reply(hdev, skb);
2330 break;
2331
2332 case HCI_OP_LE_READ_BUFFER_SIZE:
2333 hci_cc_le_read_buffer_size(hdev, skb);
2334 break;
2335
2336 case HCI_OP_USER_CONFIRM_REPLY:
2337 hci_cc_user_confirm_reply(hdev, skb);
2338 break;
2339
2340 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2341 hci_cc_user_confirm_neg_reply(hdev, skb);
2342 break;
2343
2344 case HCI_OP_USER_PASSKEY_REPLY:
2345 hci_cc_user_passkey_reply(hdev, skb);
2346 break;
2347
2348 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2349 hci_cc_user_passkey_neg_reply(hdev, skb);
2350 break;
2351
2352 case HCI_OP_LE_SET_SCAN_PARAM:
2353 hci_cc_le_set_scan_param(hdev, skb);
2354 break;
2355
2356 case HCI_OP_LE_SET_SCAN_ENABLE:
2357 hci_cc_le_set_scan_enable(hdev, skb);
2358 break;
2359
2360 case HCI_OP_LE_LTK_REPLY:
2361 hci_cc_le_ltk_reply(hdev, skb);
2362 break;
2363
2364 case HCI_OP_LE_LTK_NEG_REPLY:
2365 hci_cc_le_ltk_neg_reply(hdev, skb);
2366 break;
2367
2368 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2369 hci_cc_write_le_host_supported(hdev, skb);
2370 break;
2371
2372 default:
2373 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2374 break;
2375 }
2376
2377 if (ev->opcode != HCI_OP_NOP)
2378 del_timer(&hdev->cmd_timer);
2379
2380 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2381 atomic_set(&hdev->cmd_cnt, 1);
2382 if (!skb_queue_empty(&hdev->cmd_q))
2383 queue_work(hdev->workqueue, &hdev->cmd_work);
2384 }
2385}
2386
2387static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2388{
2389 struct hci_ev_cmd_status *ev = (void *) skb->data;
2390 __u16 opcode;
2391
2392 skb_pull(skb, sizeof(*ev));
2393
2394 opcode = __le16_to_cpu(ev->opcode);
2395
2396 switch (opcode) {
2397 case HCI_OP_INQUIRY:
2398 hci_cs_inquiry(hdev, ev->status);
2399 break;
2400
2401 case HCI_OP_CREATE_CONN:
2402 hci_cs_create_conn(hdev, ev->status);
2403 break;
2404
2405 case HCI_OP_ADD_SCO:
2406 hci_cs_add_sco(hdev, ev->status);
2407 break;
2408
2409 case HCI_OP_AUTH_REQUESTED:
2410 hci_cs_auth_requested(hdev, ev->status);
2411 break;
2412
2413 case HCI_OP_SET_CONN_ENCRYPT:
2414 hci_cs_set_conn_encrypt(hdev, ev->status);
2415 break;
2416
2417 case HCI_OP_REMOTE_NAME_REQ:
2418 hci_cs_remote_name_req(hdev, ev->status);
2419 break;
2420
2421 case HCI_OP_READ_REMOTE_FEATURES:
2422 hci_cs_read_remote_features(hdev, ev->status);
2423 break;
2424
2425 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2426 hci_cs_read_remote_ext_features(hdev, ev->status);
2427 break;
2428
2429 case HCI_OP_SETUP_SYNC_CONN:
2430 hci_cs_setup_sync_conn(hdev, ev->status);
2431 break;
2432
2433 case HCI_OP_SNIFF_MODE:
2434 hci_cs_sniff_mode(hdev, ev->status);
2435 break;
2436
2437 case HCI_OP_EXIT_SNIFF_MODE:
2438 hci_cs_exit_sniff_mode(hdev, ev->status);
2439 break;
2440
2441 case HCI_OP_DISCONNECT:
2442 hci_cs_disconnect(hdev, ev->status);
2443 break;
2444
2445 case HCI_OP_LE_CREATE_CONN:
2446 hci_cs_le_create_conn(hdev, ev->status);
2447 break;
2448
2449 case HCI_OP_LE_START_ENC:
2450 hci_cs_le_start_enc(hdev, ev->status);
2451 break;
2452
2453 default:
2454 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2455 break;
2456 }
2457
2458 if (ev->opcode != HCI_OP_NOP)
2459 del_timer(&hdev->cmd_timer);
2460
2461 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2462 atomic_set(&hdev->cmd_cnt, 1);
2463 if (!skb_queue_empty(&hdev->cmd_q))
2464 queue_work(hdev->workqueue, &hdev->cmd_work);
2465 }
2466}
2467
2468static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2469{
2470 struct hci_ev_role_change *ev = (void *) skb->data;
2471 struct hci_conn *conn;
2472
2473 BT_DBG("%s status %d", hdev->name, ev->status);
2474
2475 hci_dev_lock(hdev);
2476
2477 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2478 if (conn) {
2479 if (!ev->status) {
2480 if (ev->role)
2481 conn->link_mode &= ~HCI_LM_MASTER;
2482 else
2483 conn->link_mode |= HCI_LM_MASTER;
2484 }
2485
2486 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2487
2488 hci_role_switch_cfm(conn, ev->status, ev->role);
2489 }
2490
2491 hci_dev_unlock(hdev);
2492}
2493
2494static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2495{
2496 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2497 int i;
2498
2499 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2500 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2501 return;
2502 }
2503
2504 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2505 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2506 BT_DBG("%s bad parameters", hdev->name);
2507 return;
2508 }
2509
2510 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2511
2512 for (i = 0; i < ev->num_hndl; i++) {
2513 struct hci_comp_pkts_info *info = &ev->handles[i];
2514 struct hci_conn *conn;
2515 __u16 handle, count;
2516
2517 handle = __le16_to_cpu(info->handle);
2518 count = __le16_to_cpu(info->count);
2519
2520 conn = hci_conn_hash_lookup_handle(hdev, handle);
2521 if (!conn)
2522 continue;
2523
2524 conn->sent -= count;
2525
2526 switch (conn->type) {
2527 case ACL_LINK:
2528 hdev->acl_cnt += count;
2529 if (hdev->acl_cnt > hdev->acl_pkts)
2530 hdev->acl_cnt = hdev->acl_pkts;
2531 break;
2532
2533 case LE_LINK:
2534 if (hdev->le_pkts) {
2535 hdev->le_cnt += count;
2536 if (hdev->le_cnt > hdev->le_pkts)
2537 hdev->le_cnt = hdev->le_pkts;
2538 } else {
2539 hdev->acl_cnt += count;
2540 if (hdev->acl_cnt > hdev->acl_pkts)
2541 hdev->acl_cnt = hdev->acl_pkts;
2542 }
2543 break;
2544
2545 case SCO_LINK:
2546 hdev->sco_cnt += count;
2547 if (hdev->sco_cnt > hdev->sco_pkts)
2548 hdev->sco_cnt = hdev->sco_pkts;
2549 break;
2550
2551 default:
2552 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2553 break;
2554 }
2555 }
2556
2557 queue_work(hdev->workqueue, &hdev->tx_work);
2558}
2559
2560static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2561 struct sk_buff *skb)
2562{
2563 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2564 int i;
2565
2566 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2567 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2568 return;
2569 }
2570
2571 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2572 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2573 BT_DBG("%s bad parameters", hdev->name);
2574 return;
2575 }
2576
2577 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2578 ev->num_hndl);
2579
2580 for (i = 0; i < ev->num_hndl; i++) {
2581 struct hci_comp_blocks_info *info = &ev->handles[i];
2582 struct hci_conn *conn;
2583 __u16 handle, block_count;
2584
2585 handle = __le16_to_cpu(info->handle);
2586 block_count = __le16_to_cpu(info->blocks);
2587
2588 conn = hci_conn_hash_lookup_handle(hdev, handle);
2589 if (!conn)
2590 continue;
2591
2592 conn->sent -= block_count;
2593
2594 switch (conn->type) {
2595 case ACL_LINK:
2596 hdev->block_cnt += block_count;
2597 if (hdev->block_cnt > hdev->num_blocks)
2598 hdev->block_cnt = hdev->num_blocks;
2599 break;
2600
2601 default:
2602 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2603 break;
2604 }
2605 }
2606
2607 queue_work(hdev->workqueue, &hdev->tx_work);
2608}
2609
2610static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2611{
2612 struct hci_ev_mode_change *ev = (void *) skb->data;
2613 struct hci_conn *conn;
2614
2615 BT_DBG("%s status %d", hdev->name, ev->status);
2616
2617 hci_dev_lock(hdev);
2618
2619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2620 if (conn) {
2621 conn->mode = ev->mode;
2622 conn->interval = __le16_to_cpu(ev->interval);
2623
2624 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2625 if (conn->mode == HCI_CM_ACTIVE)
2626 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2627 else
2628 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2629 }
2630
2631 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2632 hci_sco_setup(conn, ev->status);
2633 }
2634
2635 hci_dev_unlock(hdev);
2636}
2637
2638static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2639{
2640 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2641 struct hci_conn *conn;
2642
2643 BT_DBG("%s", hdev->name);
2644
2645 hci_dev_lock(hdev);
2646
2647 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2648 if (!conn)
2649 goto unlock;
2650
2651 if (conn->state == BT_CONNECTED) {
2652 hci_conn_hold(conn);
2653 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2654 hci_conn_put(conn);
2655 }
2656
2657 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2658 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2659 sizeof(ev->bdaddr), &ev->bdaddr);
2660 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2661 u8 secure;
2662
2663 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2664 secure = 1;
2665 else
2666 secure = 0;
2667
2668 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2669 }
2670
2671unlock:
2672 hci_dev_unlock(hdev);
2673}
2674
2675static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2676{
2677 struct hci_ev_link_key_req *ev = (void *) skb->data;
2678 struct hci_cp_link_key_reply cp;
2679 struct hci_conn *conn;
2680 struct link_key *key;
2681
2682 BT_DBG("%s", hdev->name);
2683
2684 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2685 return;
2686
2687 hci_dev_lock(hdev);
2688
2689 key = hci_find_link_key(hdev, &ev->bdaddr);
2690 if (!key) {
2691 BT_DBG("%s link key not found for %s", hdev->name,
2692 batostr(&ev->bdaddr));
2693 goto not_found;
2694 }
2695
2696 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2697 batostr(&ev->bdaddr));
2698
2699 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2700 key->type == HCI_LK_DEBUG_COMBINATION) {
2701 BT_DBG("%s ignoring debug key", hdev->name);
2702 goto not_found;
2703 }
2704
2705 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2706 if (conn) {
2707 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2708 conn->auth_type != 0xff &&
2709 (conn->auth_type & 0x01)) {
2710 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2711 goto not_found;
2712 }
2713
2714 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2715 conn->pending_sec_level == BT_SECURITY_HIGH) {
2716 BT_DBG("%s ignoring key unauthenticated for high \
2717 security", hdev->name);
2718 goto not_found;
2719 }
2720
2721 conn->key_type = key->type;
2722 conn->pin_length = key->pin_len;
2723 }
2724
2725 bacpy(&cp.bdaddr, &ev->bdaddr);
2726 memcpy(cp.link_key, key->val, 16);
2727
2728 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2729
2730 hci_dev_unlock(hdev);
2731
2732 return;
2733
2734not_found:
2735 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2736 hci_dev_unlock(hdev);
2737}
2738
2739static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2740{
2741 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2742 struct hci_conn *conn;
2743 u8 pin_len = 0;
2744
2745 BT_DBG("%s", hdev->name);
2746
2747 hci_dev_lock(hdev);
2748
2749 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2750 if (conn) {
2751 hci_conn_hold(conn);
2752 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2753 pin_len = conn->pin_length;
2754
2755 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2756 conn->key_type = ev->key_type;
2757
2758 hci_conn_put(conn);
2759 }
2760
2761 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2762 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2763 ev->key_type, pin_len);
2764
2765 hci_dev_unlock(hdev);
2766}
2767
2768static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2769{
2770 struct hci_ev_clock_offset *ev = (void *) skb->data;
2771 struct hci_conn *conn;
2772
2773 BT_DBG("%s status %d", hdev->name, ev->status);
2774
2775 hci_dev_lock(hdev);
2776
2777 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2778 if (conn && !ev->status) {
2779 struct inquiry_entry *ie;
2780
2781 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2782 if (ie) {
2783 ie->data.clock_offset = ev->clock_offset;
2784 ie->timestamp = jiffies;
2785 }
2786 }
2787
2788 hci_dev_unlock(hdev);
2789}
2790
2791static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2792{
2793 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2794 struct hci_conn *conn;
2795
2796 BT_DBG("%s status %d", hdev->name, ev->status);
2797
2798 hci_dev_lock(hdev);
2799
2800 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2801 if (conn && !ev->status)
2802 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2803
2804 hci_dev_unlock(hdev);
2805}
2806
2807static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2808{
2809 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2810 struct inquiry_entry *ie;
2811
2812 BT_DBG("%s", hdev->name);
2813
2814 hci_dev_lock(hdev);
2815
2816 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2817 if (ie) {
2818 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2819 ie->timestamp = jiffies;
2820 }
2821
2822 hci_dev_unlock(hdev);
2823}
2824
2825static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2826{
2827 struct inquiry_data data;
2828 int num_rsp = *((__u8 *) skb->data);
2829 bool name_known, ssp;
2830
2831 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2832
2833 if (!num_rsp)
2834 return;
2835
2836 hci_dev_lock(hdev);
2837
2838 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2839 struct inquiry_info_with_rssi_and_pscan_mode *info;
2840 info = (void *) (skb->data + 1);
2841
2842 for (; num_rsp; num_rsp--, info++) {
2843 bacpy(&data.bdaddr, &info->bdaddr);
2844 data.pscan_rep_mode = info->pscan_rep_mode;
2845 data.pscan_period_mode = info->pscan_period_mode;
2846 data.pscan_mode = info->pscan_mode;
2847 memcpy(data.dev_class, info->dev_class, 3);
2848 data.clock_offset = info->clock_offset;
2849 data.rssi = info->rssi;
2850 data.ssp_mode = 0x00;
2851
2852 name_known = hci_inquiry_cache_update(hdev, &data,
2853 false, &ssp);
2854 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2855 info->dev_class, info->rssi,
2856 !name_known, ssp, NULL, 0);
2857 }
2858 } else {
2859 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2860
2861 for (; num_rsp; num_rsp--, info++) {
2862 bacpy(&data.bdaddr, &info->bdaddr);
2863 data.pscan_rep_mode = info->pscan_rep_mode;
2864 data.pscan_period_mode = info->pscan_period_mode;
2865 data.pscan_mode = 0x00;
2866 memcpy(data.dev_class, info->dev_class, 3);
2867 data.clock_offset = info->clock_offset;
2868 data.rssi = info->rssi;
2869 data.ssp_mode = 0x00;
2870 name_known = hci_inquiry_cache_update(hdev, &data,
2871 false, &ssp);
2872 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2873 info->dev_class, info->rssi,
2874 !name_known, ssp, NULL, 0);
2875 }
2876 }
2877
2878 hci_dev_unlock(hdev);
2879}
2880
2881static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2882{
2883 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2884 struct hci_conn *conn;
2885
2886 BT_DBG("%s", hdev->name);
2887
2888 hci_dev_lock(hdev);
2889
2890 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2891 if (!conn)
2892 goto unlock;
2893
2894 if (!ev->status && ev->page == 0x01) {
2895 struct inquiry_entry *ie;
2896
2897 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2898 if (ie)
2899 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2900
2901 if (ev->features[0] & LMP_HOST_SSP)
2902 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2903 }
2904
2905 if (conn->state != BT_CONFIG)
2906 goto unlock;
2907
2908 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2909 struct hci_cp_remote_name_req cp;
2910 memset(&cp, 0, sizeof(cp));
2911 bacpy(&cp.bdaddr, &conn->dst);
2912 cp.pscan_rep_mode = 0x02;
2913 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2914 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2915 mgmt_device_connected(hdev, &conn->dst, conn->type,
2916 conn->dst_type, 0, NULL, 0,
2917 conn->dev_class);
2918
2919 if (!hci_outgoing_auth_needed(hdev, conn)) {
2920 conn->state = BT_CONNECTED;
2921 hci_proto_connect_cfm(conn, ev->status);
2922 hci_conn_put(conn);
2923 }
2924
2925unlock:
2926 hci_dev_unlock(hdev);
2927}
2928
2929static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2930{
2931 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2932 struct hci_conn *conn;
2933
2934 BT_DBG("%s status %d", hdev->name, ev->status);
2935
2936 hci_dev_lock(hdev);
2937
2938 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2939 if (!conn) {
2940 if (ev->link_type == ESCO_LINK)
2941 goto unlock;
2942
2943 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2944 if (!conn)
2945 goto unlock;
2946
2947 conn->type = SCO_LINK;
2948 }
2949
2950 switch (ev->status) {
2951 case 0x00:
2952 conn->handle = __le16_to_cpu(ev->handle);
2953 conn->state = BT_CONNECTED;
2954
2955 hci_conn_hold_device(conn);
2956 hci_conn_add_sysfs(conn);
2957 break;
2958
2959 case 0x10: /* Connection Accept Timeout */
2960 case 0x11: /* Unsupported Feature or Parameter Value */
2961 case 0x1c: /* SCO interval rejected */
2962 case 0x1a: /* Unsupported Remote Feature */
2963 case 0x1f: /* Unspecified error */
2964 if (conn->out && conn->attempt < 2) {
2965 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2966 (hdev->esco_type & EDR_ESCO_MASK);
2967 hci_setup_sync(conn, conn->link->handle);
2968 goto unlock;
2969 }
2970 /* fall through */
2971
2972 default:
2973 conn->state = BT_CLOSED;
2974 break;
2975 }
2976
2977 hci_proto_connect_cfm(conn, ev->status);
2978 if (ev->status)
2979 hci_conn_del(conn);
2980
2981unlock:
2982 hci_dev_unlock(hdev);
2983}
2984
2985static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2986{
2987 BT_DBG("%s", hdev->name);
2988}
2989
2990static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2991{
2992 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2993
2994 BT_DBG("%s status %d", hdev->name, ev->status);
2995}
2996
2997static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2998{
2999 struct inquiry_data data;
3000 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3001 int num_rsp = *((__u8 *) skb->data);
3002
3003 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3004
3005 if (!num_rsp)
3006 return;
3007
3008 hci_dev_lock(hdev);
3009
3010 for (; num_rsp; num_rsp--, info++) {
3011 bool name_known, ssp;
3012
3013 bacpy(&data.bdaddr, &info->bdaddr);
3014 data.pscan_rep_mode = info->pscan_rep_mode;
3015 data.pscan_period_mode = info->pscan_period_mode;
3016 data.pscan_mode = 0x00;
3017 memcpy(data.dev_class, info->dev_class, 3);
3018 data.clock_offset = info->clock_offset;
3019 data.rssi = info->rssi;
3020 data.ssp_mode = 0x01;
3021
3022 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3023 name_known = eir_has_data_type(info->data,
3024 sizeof(info->data),
3025 EIR_NAME_COMPLETE);
3026 else
3027 name_known = true;
3028
3029 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3030 &ssp);
3031 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3032 info->dev_class, info->rssi, !name_known,
3033 ssp, info->data, sizeof(info->data));
3034 }
3035
3036 hci_dev_unlock(hdev);
3037}
3038
3039static inline u8 hci_get_auth_req(struct hci_conn *conn)
3040{
3041 /* If remote requests dedicated bonding follow that lead */
3042 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3043 /* If both remote and local IO capabilities allow MITM
3044 * protection then require it, otherwise don't */
3045 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3046 return 0x02;
3047 else
3048 return 0x03;
3049 }
3050
3051 /* If remote requests no-bonding follow that lead */
3052 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3053 return conn->remote_auth | (conn->auth_type & 0x01);
3054
3055 return conn->auth_type;
3056}
3057
3058static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3059{
3060 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3061 struct hci_conn *conn;
3062
3063 BT_DBG("%s", hdev->name);
3064
3065 hci_dev_lock(hdev);
3066
3067 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3068 if (!conn)
3069 goto unlock;
3070
3071 hci_conn_hold(conn);
3072
3073 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3074 goto unlock;
3075
3076 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3077 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3078 struct hci_cp_io_capability_reply cp;
3079
3080 bacpy(&cp.bdaddr, &ev->bdaddr);
3081 /* Change the IO capability from KeyboardDisplay
3082 * to DisplayYesNo as it is not supported by BT spec. */
3083 cp.capability = (conn->io_capability == 0x04) ?
3084 0x01 : conn->io_capability;
3085 conn->auth_type = hci_get_auth_req(conn);
3086 cp.authentication = conn->auth_type;
3087
3088 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3089 hci_find_remote_oob_data(hdev, &conn->dst))
3090 cp.oob_data = 0x01;
3091 else
3092 cp.oob_data = 0x00;
3093
3094 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3095 sizeof(cp), &cp);
3096 } else {
3097 struct hci_cp_io_capability_neg_reply cp;
3098
3099 bacpy(&cp.bdaddr, &ev->bdaddr);
3100 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3101
3102 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3103 sizeof(cp), &cp);
3104 }
3105
3106unlock:
3107 hci_dev_unlock(hdev);
3108}
3109
3110static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3111{
3112 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3113 struct hci_conn *conn;
3114
3115 BT_DBG("%s", hdev->name);
3116
3117 hci_dev_lock(hdev);
3118
3119 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3120 if (!conn)
3121 goto unlock;
3122
3123 conn->remote_cap = ev->capability;
3124 conn->remote_auth = ev->authentication;
3125 if (ev->oob_data)
3126 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3127
3128unlock:
3129 hci_dev_unlock(hdev);
3130}
3131
3132static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3133 struct sk_buff *skb)
3134{
3135 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3136 int loc_mitm, rem_mitm, confirm_hint = 0;
3137 struct hci_conn *conn;
3138
3139 BT_DBG("%s", hdev->name);
3140
3141 hci_dev_lock(hdev);
3142
3143 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3144 goto unlock;
3145
3146 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3147 if (!conn)
3148 goto unlock;
3149
3150 loc_mitm = (conn->auth_type & 0x01);
3151 rem_mitm = (conn->remote_auth & 0x01);
3152
3153 /* If we require MITM but the remote device can't provide that
3154 * (it has NoInputNoOutput) then reject the confirmation
3155 * request. The only exception is when we're dedicated bonding
3156 * initiators (connect_cfm_cb set) since then we always have the MITM
3157 * bit set. */
3158 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3159 BT_DBG("Rejecting request: remote device can't provide MITM");
3160 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3161 sizeof(ev->bdaddr), &ev->bdaddr);
3162 goto unlock;
3163 }
3164
3165 /* If no side requires MITM protection; auto-accept */
3166 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3167 (!rem_mitm || conn->io_capability == 0x03)) {
3168
3169 /* If we're not the initiators request authorization to
3170 * proceed from user space (mgmt_user_confirm with
3171 * confirm_hint set to 1). The exception is if neither
3172 * side had MITM in which case we do auto-accept.
3173 */
3174 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3175 (loc_mitm || rem_mitm)) {
3176 BT_DBG("Confirming auto-accept as acceptor");
3177 confirm_hint = 1;
3178 goto confirm;
3179 }
3180
3181 BT_DBG("Auto-accept of user confirmation with %ums delay",
3182 hdev->auto_accept_delay);
3183
3184 if (hdev->auto_accept_delay > 0) {
3185 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3186 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3187 goto unlock;
3188 }
3189
3190 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3191 sizeof(ev->bdaddr), &ev->bdaddr);
3192 goto unlock;
3193 }
3194
3195confirm:
3196 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3197 confirm_hint);
3198
3199unlock:
3200 hci_dev_unlock(hdev);
3201}
3202
3203static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3204 struct sk_buff *skb)
3205{
3206 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3207
3208 BT_DBG("%s", hdev->name);
3209
3210 hci_dev_lock(hdev);
3211
3212 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3213 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3214
3215 hci_dev_unlock(hdev);
3216}
3217
3218static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3219{
3220 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3221 struct hci_conn *conn;
3222
3223 BT_DBG("%s", hdev->name);
3224
3225 hci_dev_lock(hdev);
3226
3227 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3228 if (!conn)
3229 goto unlock;
3230
3231 /* To avoid duplicate auth_failed events to user space we check
3232 * the HCI_CONN_AUTH_PEND flag which will be set if we
3233 * initiated the authentication. A traditional auth_complete
3234 * event gets always produced as initiator and is also mapped to
3235 * the mgmt_auth_failed event */
3236 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3237 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3238 ev->status);
3239
3240 hci_conn_put(conn);
3241
3242unlock:
3243 hci_dev_unlock(hdev);
3244}
3245
3246static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3247{
3248 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3249 struct inquiry_entry *ie;
3250
3251 BT_DBG("%s", hdev->name);
3252
3253 hci_dev_lock(hdev);
3254
3255 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3256 if (ie)
3257 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3258
3259 hci_dev_unlock(hdev);
3260}
3261
3262static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3263 struct sk_buff *skb)
3264{
3265 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3266 struct oob_data *data;
3267
3268 BT_DBG("%s", hdev->name);
3269
3270 hci_dev_lock(hdev);
3271
3272 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3273 goto unlock;
3274
3275 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3276 if (data) {
3277 struct hci_cp_remote_oob_data_reply cp;
3278
3279 bacpy(&cp.bdaddr, &ev->bdaddr);
3280 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3281 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3282
3283 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3284 &cp);
3285 } else {
3286 struct hci_cp_remote_oob_data_neg_reply cp;
3287
3288 bacpy(&cp.bdaddr, &ev->bdaddr);
3289 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3290 &cp);
3291 }
3292
3293unlock:
3294 hci_dev_unlock(hdev);
3295}
3296
3297static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3298{
3299 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3300 struct hci_conn *conn;
3301
3302 BT_DBG("%s status %d", hdev->name, ev->status);
3303
3304 hci_dev_lock(hdev);
3305
3306 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3307 if (!conn) {
3308 conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
3309 if (!conn) {
3310 BT_ERR("No memory for new connection");
3311 hci_dev_unlock(hdev);
3312 return;
3313 }
3314
3315 conn->dst_type = ev->bdaddr_type;
3316 }
3317
3318 if (ev->status) {
3319 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3320 conn->dst_type, ev->status);
3321 hci_proto_connect_cfm(conn, ev->status);
3322 conn->state = BT_CLOSED;
3323 hci_conn_del(conn);
3324 goto unlock;
3325 }
3326
3327 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3328 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3329 conn->dst_type, 0, NULL, 0, NULL);
3330
3331 conn->sec_level = BT_SECURITY_LOW;
3332 conn->handle = __le16_to_cpu(ev->handle);
3333 conn->state = BT_CONNECTED;
3334
3335 hci_conn_hold_device(conn);
3336 hci_conn_add_sysfs(conn);
3337
3338 hci_proto_connect_cfm(conn, ev->status);
3339
3340unlock:
3341 hci_dev_unlock(hdev);
3342}
3343
3344static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3345 struct sk_buff *skb)
3346{
3347 u8 num_reports = skb->data[0];
3348 void *ptr = &skb->data[1];
3349 s8 rssi;
3350
3351 hci_dev_lock(hdev);
3352
3353 while (num_reports--) {
3354 struct hci_ev_le_advertising_info *ev = ptr;
3355
3356 hci_add_adv_entry(hdev, ev);
3357
3358 rssi = ev->data[ev->length];
3359 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3360 NULL, rssi, 0, 1, ev->data, ev->length);
3361
3362 ptr += sizeof(*ev) + ev->length + 1;
3363 }
3364
3365 hci_dev_unlock(hdev);
3366}
3367
3368static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3369 struct sk_buff *skb)
3370{
3371 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3372 struct hci_cp_le_ltk_reply cp;
3373 struct hci_cp_le_ltk_neg_reply neg;
3374 struct hci_conn *conn;
3375 struct smp_ltk *ltk;
3376
3377 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3378
3379 hci_dev_lock(hdev);
3380
3381 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3382 if (conn == NULL)
3383 goto not_found;
3384
3385 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3386 if (ltk == NULL)
3387 goto not_found;
3388
3389 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3390 cp.handle = cpu_to_le16(conn->handle);
3391
3392 if (ltk->authenticated)
3393 conn->pending_sec_level = BT_SECURITY_HIGH;
3394 else
3395 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3396
3397 conn->enc_key_size = ltk->enc_size;
3398
3399 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3400
3401 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
3402 * temporary key used to encrypt a connection following
3403 * pairing. It is used during the Encrypted Session Setup to
3404 * distribute the keys. Later, security can be re-established
3405 * using a distributed LTK.
3406 */
3407 if (ltk->type == HCI_SMP_STK_SLAVE) {
3408 list_del(&ltk->list);
3409 kfree(ltk);
3410 }
3411
3412 hci_dev_unlock(hdev);
3413
3414 return;
3415
3416not_found:
3417 neg.handle = ev->handle;
3418 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3419 hci_dev_unlock(hdev);
3420}
3421
3422static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3423{
3424 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3425
3426 skb_pull(skb, sizeof(*le_ev));
3427
3428 switch (le_ev->subevent) {
3429 case HCI_EV_LE_CONN_COMPLETE:
3430 hci_le_conn_complete_evt(hdev, skb);
3431 break;
3432
3433 case HCI_EV_LE_ADVERTISING_REPORT:
3434 hci_le_adv_report_evt(hdev, skb);
3435 break;
3436
3437 case HCI_EV_LE_LTK_REQ:
3438 hci_le_ltk_request_evt(hdev, skb);
3439 break;
3440
3441 default:
3442 break;
3443 }
3444}
3445
3446void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3447{
3448 struct hci_event_hdr *hdr = (void *) skb->data;
3449 __u8 event = hdr->evt;
3450
3451 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3452
3453 switch (event) {
3454 case HCI_EV_INQUIRY_COMPLETE:
3455 hci_inquiry_complete_evt(hdev, skb);
3456 break;
3457
3458 case HCI_EV_INQUIRY_RESULT:
3459 hci_inquiry_result_evt(hdev, skb);
3460 break;
3461
3462 case HCI_EV_CONN_COMPLETE:
3463 hci_conn_complete_evt(hdev, skb);
3464 break;
3465
3466 case HCI_EV_CONN_REQUEST:
3467 hci_conn_request_evt(hdev, skb);
3468 break;
3469
3470 case HCI_EV_DISCONN_COMPLETE:
3471 hci_disconn_complete_evt(hdev, skb);
3472 break;
3473
3474 case HCI_EV_AUTH_COMPLETE:
3475 hci_auth_complete_evt(hdev, skb);
3476 break;
3477
3478 case HCI_EV_REMOTE_NAME:
3479 hci_remote_name_evt(hdev, skb);
3480 break;
3481
3482 case HCI_EV_ENCRYPT_CHANGE:
3483 hci_encrypt_change_evt(hdev, skb);
3484 break;
3485
3486 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3487 hci_change_link_key_complete_evt(hdev, skb);
3488 break;
3489
3490 case HCI_EV_REMOTE_FEATURES:
3491 hci_remote_features_evt(hdev, skb);
3492 break;
3493
3494 case HCI_EV_REMOTE_VERSION:
3495 hci_remote_version_evt(hdev, skb);
3496 break;
3497
3498 case HCI_EV_QOS_SETUP_COMPLETE:
3499 hci_qos_setup_complete_evt(hdev, skb);
3500 break;
3501
3502 case HCI_EV_CMD_COMPLETE:
3503 hci_cmd_complete_evt(hdev, skb);
3504 break;
3505
3506 case HCI_EV_CMD_STATUS:
3507 hci_cmd_status_evt(hdev, skb);
3508 break;
3509
3510 case HCI_EV_ROLE_CHANGE:
3511 hci_role_change_evt(hdev, skb);
3512 break;
3513
3514 case HCI_EV_NUM_COMP_PKTS:
3515 hci_num_comp_pkts_evt(hdev, skb);
3516 break;
3517
3518 case HCI_EV_MODE_CHANGE:
3519 hci_mode_change_evt(hdev, skb);
3520 break;
3521
3522 case HCI_EV_PIN_CODE_REQ:
3523 hci_pin_code_request_evt(hdev, skb);
3524 break;
3525
3526 case HCI_EV_LINK_KEY_REQ:
3527 hci_link_key_request_evt(hdev, skb);
3528 break;
3529
3530 case HCI_EV_LINK_KEY_NOTIFY:
3531 hci_link_key_notify_evt(hdev, skb);
3532 break;
3533
3534 case HCI_EV_CLOCK_OFFSET:
3535 hci_clock_offset_evt(hdev, skb);
3536 break;
3537
3538 case HCI_EV_PKT_TYPE_CHANGE:
3539 hci_pkt_type_change_evt(hdev, skb);
3540 break;
3541
3542 case HCI_EV_PSCAN_REP_MODE:
3543 hci_pscan_rep_mode_evt(hdev, skb);
3544 break;
3545
3546 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3547 hci_inquiry_result_with_rssi_evt(hdev, skb);
3548 break;
3549
3550 case HCI_EV_REMOTE_EXT_FEATURES:
3551 hci_remote_ext_features_evt(hdev, skb);
3552 break;
3553
3554 case HCI_EV_SYNC_CONN_COMPLETE:
3555 hci_sync_conn_complete_evt(hdev, skb);
3556 break;
3557
3558 case HCI_EV_SYNC_CONN_CHANGED:
3559 hci_sync_conn_changed_evt(hdev, skb);
3560 break;
3561
3562 case HCI_EV_SNIFF_SUBRATE:
3563 hci_sniff_subrate_evt(hdev, skb);
3564 break;
3565
3566 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3567 hci_extended_inquiry_result_evt(hdev, skb);
3568 break;
3569
3570 case HCI_EV_IO_CAPA_REQUEST:
3571 hci_io_capa_request_evt(hdev, skb);
3572 break;
3573
3574 case HCI_EV_IO_CAPA_REPLY:
3575 hci_io_capa_reply_evt(hdev, skb);
3576 break;
3577
3578 case HCI_EV_USER_CONFIRM_REQUEST:
3579 hci_user_confirm_request_evt(hdev, skb);
3580 break;
3581
3582 case HCI_EV_USER_PASSKEY_REQUEST:
3583 hci_user_passkey_request_evt(hdev, skb);
3584 break;
3585
3586 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3587 hci_simple_pair_complete_evt(hdev, skb);
3588 break;
3589
3590 case HCI_EV_REMOTE_HOST_FEATURES:
3591 hci_remote_host_features_evt(hdev, skb);
3592 break;
3593
3594 case HCI_EV_LE_META:
3595 hci_le_meta_evt(hdev, skb);
3596 break;
3597
3598 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3599 hci_remote_oob_data_request_evt(hdev, skb);
3600 break;
3601
3602 case HCI_EV_NUM_COMP_BLOCKS:
3603 hci_num_comp_blocks_evt(hdev, skb);
3604 break;
3605
3606 default:
3607 BT_DBG("%s event 0x%x", hdev->name, event);
3608 break;
3609 }
3610
3611 kfree_skb(skb);
3612 hdev->stat.evt_rx++;
3613}