blob: 2706e238ca4496cafdf78a693c59910f564b2d9d [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_sock.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "hci_request.h"
37#include "smp.h"
38#include "mgmt_util.h"
39
40#define MGMT_VERSION 1
41#define MGMT_REVISION 14
42
43static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
45 MGMT_OP_READ_INFO,
46 MGMT_OP_SET_POWERED,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
50 MGMT_OP_SET_BONDABLE,
51 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_SSP,
53 MGMT_OP_SET_HS,
54 MGMT_OP_SET_LE,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
57 MGMT_OP_ADD_UUID,
58 MGMT_OP_REMOVE_UUID,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
61 MGMT_OP_DISCONNECT,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
66 MGMT_OP_PAIR_DEVICE,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
78 MGMT_OP_CONFIRM_NAME,
79 MGMT_OP_BLOCK_DEVICE,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
83 MGMT_OP_SET_BREDR,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
88 MGMT_OP_SET_PRIVACY,
89 MGMT_OP_LOAD_IRKS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
92 MGMT_OP_ADD_DEVICE,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 MGMT_OP_SET_APPEARANCE,
109};
110
111static const u16 mgmt_events[] = {
112 MGMT_EV_CONTROLLER_ERROR,
113 MGMT_EV_INDEX_ADDED,
114 MGMT_EV_INDEX_REMOVED,
115 MGMT_EV_NEW_SETTINGS,
116 MGMT_EV_CLASS_OF_DEV_CHANGED,
117 MGMT_EV_LOCAL_NAME_CHANGED,
118 MGMT_EV_NEW_LINK_KEY,
119 MGMT_EV_NEW_LONG_TERM_KEY,
120 MGMT_EV_DEVICE_CONNECTED,
121 MGMT_EV_DEVICE_DISCONNECTED,
122 MGMT_EV_CONNECT_FAILED,
123 MGMT_EV_PIN_CODE_REQUEST,
124 MGMT_EV_USER_CONFIRM_REQUEST,
125 MGMT_EV_USER_PASSKEY_REQUEST,
126 MGMT_EV_AUTH_FAILED,
127 MGMT_EV_DEVICE_FOUND,
128 MGMT_EV_DISCOVERING,
129 MGMT_EV_DEVICE_BLOCKED,
130 MGMT_EV_DEVICE_UNBLOCKED,
131 MGMT_EV_DEVICE_UNPAIRED,
132 MGMT_EV_PASSKEY_NOTIFY,
133 MGMT_EV_NEW_IRK,
134 MGMT_EV_NEW_CSRK,
135 MGMT_EV_DEVICE_ADDED,
136 MGMT_EV_DEVICE_REMOVED,
137 MGMT_EV_NEW_CONN_PARAM,
138 MGMT_EV_UNCONF_INDEX_ADDED,
139 MGMT_EV_UNCONF_INDEX_REMOVED,
140 MGMT_EV_NEW_CONFIG_OPTIONS,
141 MGMT_EV_EXT_INDEX_ADDED,
142 MGMT_EV_EXT_INDEX_REMOVED,
143 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 MGMT_EV_ADVERTISING_ADDED,
145 MGMT_EV_ADVERTISING_REMOVED,
146 MGMT_EV_EXT_INFO_CHANGED,
147};
148
149static const u16 mgmt_untrusted_commands[] = {
150 MGMT_OP_READ_INDEX_LIST,
151 MGMT_OP_READ_INFO,
152 MGMT_OP_READ_UNCONF_INDEX_LIST,
153 MGMT_OP_READ_CONFIG_INFO,
154 MGMT_OP_READ_EXT_INDEX_LIST,
155 MGMT_OP_READ_EXT_INFO,
156};
157
158static const u16 mgmt_untrusted_events[] = {
159 MGMT_EV_INDEX_ADDED,
160 MGMT_EV_INDEX_REMOVED,
161 MGMT_EV_NEW_SETTINGS,
162 MGMT_EV_CLASS_OF_DEV_CHANGED,
163 MGMT_EV_LOCAL_NAME_CHANGED,
164 MGMT_EV_UNCONF_INDEX_ADDED,
165 MGMT_EV_UNCONF_INDEX_REMOVED,
166 MGMT_EV_NEW_CONFIG_OPTIONS,
167 MGMT_EV_EXT_INDEX_ADDED,
168 MGMT_EV_EXT_INDEX_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170};
171
172#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
173
174#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 "\x00\x00\x00\x00\x00\x00\x00\x00"
176
177/* HCI to MGMT error code conversion table */
178static u8 mgmt_status_table[] = {
179 MGMT_STATUS_SUCCESS,
180 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
181 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
182 MGMT_STATUS_FAILED, /* Hardware Failure */
183 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
184 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
185 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
186 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
187 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
188 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
189 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
190 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
191 MGMT_STATUS_BUSY, /* Command Disallowed */
192 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
193 MGMT_STATUS_REJECTED, /* Rejected Security */
194 MGMT_STATUS_REJECTED, /* Rejected Personal */
195 MGMT_STATUS_TIMEOUT, /* Host Timeout */
196 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
197 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
198 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
199 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
200 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
201 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
202 MGMT_STATUS_BUSY, /* Repeated Attempts */
203 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
204 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
205 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
206 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
207 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
208 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
209 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
210 MGMT_STATUS_FAILED, /* Unspecified Error */
211 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
212 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
213 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
214 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
215 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
216 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
217 MGMT_STATUS_FAILED, /* Unit Link Key Used */
218 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
219 MGMT_STATUS_TIMEOUT, /* Instant Passed */
220 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
221 MGMT_STATUS_FAILED, /* Transaction Collision */
222 MGMT_STATUS_FAILED, /* Reserved for future use */
223 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
224 MGMT_STATUS_REJECTED, /* QoS Rejected */
225 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
226 MGMT_STATUS_REJECTED, /* Insufficient Security */
227 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
228 MGMT_STATUS_FAILED, /* Reserved for future use */
229 MGMT_STATUS_BUSY, /* Role Switch Pending */
230 MGMT_STATUS_FAILED, /* Reserved for future use */
231 MGMT_STATUS_FAILED, /* Slot Violation */
232 MGMT_STATUS_FAILED, /* Role Switch Failed */
233 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
234 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
235 MGMT_STATUS_BUSY, /* Host Busy Pairing */
236 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
237 MGMT_STATUS_BUSY, /* Controller Busy */
238 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
239 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
240 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
241 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
242 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
243};
244
245static u8 mgmt_status(u8 hci_status)
246{
247 if (hci_status < ARRAY_SIZE(mgmt_status_table))
248 return mgmt_status_table[hci_status];
249
250 return MGMT_STATUS_FAILED;
251}
252
253static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
254 u16 len, int flag)
255{
256 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
257 flag, NULL);
258}
259
260static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
261 u16 len, int flag, struct sock *skip_sk)
262{
263 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
264 flag, skip_sk);
265}
266
267static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
268 struct sock *skip_sk)
269{
270 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 HCI_SOCK_TRUSTED, skip_sk);
272}
273
274static u8 le_addr_type(u8 mgmt_addr_type)
275{
276 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
277 return ADDR_LE_DEV_PUBLIC;
278 else
279 return ADDR_LE_DEV_RANDOM;
280}
281
282void mgmt_fill_version_info(void *ver)
283{
284 struct mgmt_rp_read_version *rp = ver;
285
286 rp->version = MGMT_VERSION;
287 rp->revision = cpu_to_le16(MGMT_REVISION);
288}
289
290static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
291 u16 data_len)
292{
293 struct mgmt_rp_read_version rp;
294
295 BT_DBG("sock %p", sk);
296
297 mgmt_fill_version_info(&rp);
298
299 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
300 &rp, sizeof(rp));
301}
302
303static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
304 u16 data_len)
305{
306 struct mgmt_rp_read_commands *rp;
307 u16 num_commands, num_events;
308 size_t rp_size;
309 int i, err;
310
311 BT_DBG("sock %p", sk);
312
313 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
314 num_commands = ARRAY_SIZE(mgmt_commands);
315 num_events = ARRAY_SIZE(mgmt_events);
316 } else {
317 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
318 num_events = ARRAY_SIZE(mgmt_untrusted_events);
319 }
320
321 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
322
323 rp = kmalloc(rp_size, GFP_KERNEL);
324 if (!rp)
325 return -ENOMEM;
326
327 rp->num_commands = cpu_to_le16(num_commands);
328 rp->num_events = cpu_to_le16(num_events);
329
330 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
331 __le16 *opcode = rp->opcodes;
332
333 for (i = 0; i < num_commands; i++, opcode++)
334 put_unaligned_le16(mgmt_commands[i], opcode);
335
336 for (i = 0; i < num_events; i++, opcode++)
337 put_unaligned_le16(mgmt_events[i], opcode);
338 } else {
339 __le16 *opcode = rp->opcodes;
340
341 for (i = 0; i < num_commands; i++, opcode++)
342 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
343
344 for (i = 0; i < num_events; i++, opcode++)
345 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
346 }
347
348 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
349 rp, rp_size);
350 kfree(rp);
351
352 return err;
353}
354
355static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
356 u16 data_len)
357{
358 struct mgmt_rp_read_index_list *rp;
359 struct hci_dev *d;
360 size_t rp_len;
361 u16 count;
362 int err;
363
364 BT_DBG("sock %p", sk);
365
366 read_lock(&hci_dev_list_lock);
367
368 count = 0;
369 list_for_each_entry(d, &hci_dev_list, list) {
370 if (d->dev_type == HCI_PRIMARY &&
371 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
372 count++;
373 }
374
375 rp_len = sizeof(*rp) + (2 * count);
376 rp = kmalloc(rp_len, GFP_ATOMIC);
377 if (!rp) {
378 read_unlock(&hci_dev_list_lock);
379 return -ENOMEM;
380 }
381
382 count = 0;
383 list_for_each_entry(d, &hci_dev_list, list) {
384 if (hci_dev_test_flag(d, HCI_SETUP) ||
385 hci_dev_test_flag(d, HCI_CONFIG) ||
386 hci_dev_test_flag(d, HCI_USER_CHANNEL))
387 continue;
388
389 /* Devices marked as raw-only are neither configured
390 * nor unconfigured controllers.
391 */
392 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
393 continue;
394
395 if (d->dev_type == HCI_PRIMARY &&
396 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
397 rp->index[count++] = cpu_to_le16(d->id);
398 BT_DBG("Added hci%u", d->id);
399 }
400 }
401
402 rp->num_controllers = cpu_to_le16(count);
403 rp_len = sizeof(*rp) + (2 * count);
404
405 read_unlock(&hci_dev_list_lock);
406
407 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
408 0, rp, rp_len);
409
410 kfree(rp);
411
412 return err;
413}
414
415static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
416 void *data, u16 data_len)
417{
418 struct mgmt_rp_read_unconf_index_list *rp;
419 struct hci_dev *d;
420 size_t rp_len;
421 u16 count;
422 int err;
423
424 BT_DBG("sock %p", sk);
425
426 read_lock(&hci_dev_list_lock);
427
428 count = 0;
429 list_for_each_entry(d, &hci_dev_list, list) {
430 if (d->dev_type == HCI_PRIMARY &&
431 hci_dev_test_flag(d, HCI_UNCONFIGURED))
432 count++;
433 }
434
435 rp_len = sizeof(*rp) + (2 * count);
436 rp = kmalloc(rp_len, GFP_ATOMIC);
437 if (!rp) {
438 read_unlock(&hci_dev_list_lock);
439 return -ENOMEM;
440 }
441
442 count = 0;
443 list_for_each_entry(d, &hci_dev_list, list) {
444 if (hci_dev_test_flag(d, HCI_SETUP) ||
445 hci_dev_test_flag(d, HCI_CONFIG) ||
446 hci_dev_test_flag(d, HCI_USER_CHANNEL))
447 continue;
448
449 /* Devices marked as raw-only are neither configured
450 * nor unconfigured controllers.
451 */
452 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
453 continue;
454
455 if (d->dev_type == HCI_PRIMARY &&
456 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
457 rp->index[count++] = cpu_to_le16(d->id);
458 BT_DBG("Added hci%u", d->id);
459 }
460 }
461
462 rp->num_controllers = cpu_to_le16(count);
463 rp_len = sizeof(*rp) + (2 * count);
464
465 read_unlock(&hci_dev_list_lock);
466
467 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
468 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
469
470 kfree(rp);
471
472 return err;
473}
474
475static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
476 void *data, u16 data_len)
477{
478 struct mgmt_rp_read_ext_index_list *rp;
479 struct hci_dev *d;
480 u16 count;
481 int err;
482
483 BT_DBG("sock %p", sk);
484
485 read_lock(&hci_dev_list_lock);
486
487 count = 0;
488 list_for_each_entry(d, &hci_dev_list, list) {
489 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
490 count++;
491 }
492
493 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
494 if (!rp) {
495 read_unlock(&hci_dev_list_lock);
496 return -ENOMEM;
497 }
498
499 count = 0;
500 list_for_each_entry(d, &hci_dev_list, list) {
501 if (hci_dev_test_flag(d, HCI_SETUP) ||
502 hci_dev_test_flag(d, HCI_CONFIG) ||
503 hci_dev_test_flag(d, HCI_USER_CHANNEL))
504 continue;
505
506 /* Devices marked as raw-only are neither configured
507 * nor unconfigured controllers.
508 */
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
510 continue;
511
512 if (d->dev_type == HCI_PRIMARY) {
513 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
514 rp->entry[count].type = 0x01;
515 else
516 rp->entry[count].type = 0x00;
517 } else if (d->dev_type == HCI_AMP) {
518 rp->entry[count].type = 0x02;
519 } else {
520 continue;
521 }
522
523 rp->entry[count].bus = d->bus;
524 rp->entry[count++].index = cpu_to_le16(d->id);
525 BT_DBG("Added hci%u", d->id);
526 }
527
528 rp->num_controllers = cpu_to_le16(count);
529
530 read_unlock(&hci_dev_list_lock);
531
532 /* If this command is called at least once, then all the
533 * default index and unconfigured index events are disabled
534 * and from now on only extended index events are used.
535 */
536 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
539
540 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
542 struct_size(rp, entry, count));
543
544 kfree(rp);
545
546 return err;
547}
548
549static bool is_configured(struct hci_dev *hdev)
550{
551 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
552 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
553 return false;
554
555 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
556 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
557 !bacmp(&hdev->public_addr, BDADDR_ANY))
558 return false;
559
560 return true;
561}
562
563static __le32 get_missing_options(struct hci_dev *hdev)
564{
565 u32 options = 0;
566
567 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
568 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
569 options |= MGMT_OPTION_EXTERNAL_CONFIG;
570
571 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
572 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
573 !bacmp(&hdev->public_addr, BDADDR_ANY))
574 options |= MGMT_OPTION_PUBLIC_ADDRESS;
575
576 return cpu_to_le32(options);
577}
578
579static int new_options(struct hci_dev *hdev, struct sock *skip)
580{
581 __le32 options = get_missing_options(hdev);
582
583 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
584 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
585}
586
587static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
588{
589 __le32 options = get_missing_options(hdev);
590
591 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
592 sizeof(options));
593}
594
595static int read_config_info(struct sock *sk, struct hci_dev *hdev,
596 void *data, u16 data_len)
597{
598 struct mgmt_rp_read_config_info rp;
599 u32 options = 0;
600
601 BT_DBG("sock %p %s", sk, hdev->name);
602
603 hci_dev_lock(hdev);
604
605 memset(&rp, 0, sizeof(rp));
606 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
607
608 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
609 options |= MGMT_OPTION_EXTERNAL_CONFIG;
610
611 if (hdev->set_bdaddr)
612 options |= MGMT_OPTION_PUBLIC_ADDRESS;
613
614 rp.supported_options = cpu_to_le32(options);
615 rp.missing_options = get_missing_options(hdev);
616
617 hci_dev_unlock(hdev);
618
619 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
620 &rp, sizeof(rp));
621}
622
623static u32 get_supported_phys(struct hci_dev *hdev)
624{
625 u32 supported_phys = 0;
626
627 if (lmp_bredr_capable(hdev)) {
628 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
629
630 if (hdev->features[0][0] & LMP_3SLOT)
631 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
632
633 if (hdev->features[0][0] & LMP_5SLOT)
634 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
635
636 if (lmp_edr_2m_capable(hdev)) {
637 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
638
639 if (lmp_edr_3slot_capable(hdev))
640 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
641
642 if (lmp_edr_5slot_capable(hdev))
643 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
644
645 if (lmp_edr_3m_capable(hdev)) {
646 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
647
648 if (lmp_edr_3slot_capable(hdev))
649 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
650
651 if (lmp_edr_5slot_capable(hdev))
652 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
653 }
654 }
655 }
656
657 if (lmp_le_capable(hdev)) {
658 supported_phys |= MGMT_PHY_LE_1M_TX;
659 supported_phys |= MGMT_PHY_LE_1M_RX;
660
661 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
662 supported_phys |= MGMT_PHY_LE_2M_TX;
663 supported_phys |= MGMT_PHY_LE_2M_RX;
664 }
665
666 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
667 supported_phys |= MGMT_PHY_LE_CODED_TX;
668 supported_phys |= MGMT_PHY_LE_CODED_RX;
669 }
670 }
671
672 return supported_phys;
673}
674
675static u32 get_selected_phys(struct hci_dev *hdev)
676{
677 u32 selected_phys = 0;
678
679 if (lmp_bredr_capable(hdev)) {
680 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
681
682 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
683 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
684
685 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
686 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
687
688 if (lmp_edr_2m_capable(hdev)) {
689 if (!(hdev->pkt_type & HCI_2DH1))
690 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
691
692 if (lmp_edr_3slot_capable(hdev) &&
693 !(hdev->pkt_type & HCI_2DH3))
694 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
695
696 if (lmp_edr_5slot_capable(hdev) &&
697 !(hdev->pkt_type & HCI_2DH5))
698 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
699
700 if (lmp_edr_3m_capable(hdev)) {
701 if (!(hdev->pkt_type & HCI_3DH1))
702 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
703
704 if (lmp_edr_3slot_capable(hdev) &&
705 !(hdev->pkt_type & HCI_3DH3))
706 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
707
708 if (lmp_edr_5slot_capable(hdev) &&
709 !(hdev->pkt_type & HCI_3DH5))
710 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
711 }
712 }
713 }
714
715 if (lmp_le_capable(hdev)) {
716 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
717 selected_phys |= MGMT_PHY_LE_1M_TX;
718
719 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
720 selected_phys |= MGMT_PHY_LE_1M_RX;
721
722 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
723 selected_phys |= MGMT_PHY_LE_2M_TX;
724
725 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
726 selected_phys |= MGMT_PHY_LE_2M_RX;
727
728 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
729 selected_phys |= MGMT_PHY_LE_CODED_TX;
730
731 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
732 selected_phys |= MGMT_PHY_LE_CODED_RX;
733 }
734
735 return selected_phys;
736}
737
738static u32 get_configurable_phys(struct hci_dev *hdev)
739{
740 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
741 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
742}
743
744static u32 get_supported_settings(struct hci_dev *hdev)
745{
746 u32 settings = 0;
747
748 settings |= MGMT_SETTING_POWERED;
749 settings |= MGMT_SETTING_BONDABLE;
750 settings |= MGMT_SETTING_DEBUG_KEYS;
751 settings |= MGMT_SETTING_CONNECTABLE;
752 settings |= MGMT_SETTING_DISCOVERABLE;
753
754 if (lmp_bredr_capable(hdev)) {
755 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
756 settings |= MGMT_SETTING_FAST_CONNECTABLE;
757 settings |= MGMT_SETTING_BREDR;
758 settings |= MGMT_SETTING_LINK_SECURITY;
759
760 if (lmp_ssp_capable(hdev)) {
761 settings |= MGMT_SETTING_SSP;
762 if (IS_ENABLED(CONFIG_BT_HS))
763 settings |= MGMT_SETTING_HS;
764 }
765
766 if (lmp_sc_capable(hdev))
767 settings |= MGMT_SETTING_SECURE_CONN;
768 }
769
770 if (lmp_le_capable(hdev)) {
771 settings |= MGMT_SETTING_LE;
772 settings |= MGMT_SETTING_ADVERTISING;
773 settings |= MGMT_SETTING_SECURE_CONN;
774 settings |= MGMT_SETTING_PRIVACY;
775 settings |= MGMT_SETTING_STATIC_ADDRESS;
776 }
777
778 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
779 hdev->set_bdaddr)
780 settings |= MGMT_SETTING_CONFIGURATION;
781
782 settings |= MGMT_SETTING_PHY_CONFIGURATION;
783
784 return settings;
785}
786
787static u32 get_current_settings(struct hci_dev *hdev)
788{
789 u32 settings = 0;
790
791 if (hdev_is_powered(hdev))
792 settings |= MGMT_SETTING_POWERED;
793
794 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
795 settings |= MGMT_SETTING_CONNECTABLE;
796
797 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
798 settings |= MGMT_SETTING_FAST_CONNECTABLE;
799
800 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
801 settings |= MGMT_SETTING_DISCOVERABLE;
802
803 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
804 settings |= MGMT_SETTING_BONDABLE;
805
806 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
807 settings |= MGMT_SETTING_BREDR;
808
809 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
810 settings |= MGMT_SETTING_LE;
811
812 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
813 settings |= MGMT_SETTING_LINK_SECURITY;
814
815 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
816 settings |= MGMT_SETTING_SSP;
817
818 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
819 settings |= MGMT_SETTING_HS;
820
821 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
822 settings |= MGMT_SETTING_ADVERTISING;
823
824 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
825 settings |= MGMT_SETTING_SECURE_CONN;
826
827 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
828 settings |= MGMT_SETTING_DEBUG_KEYS;
829
830 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
831 settings |= MGMT_SETTING_PRIVACY;
832
833 /* The current setting for static address has two purposes. The
834 * first is to indicate if the static address will be used and
835 * the second is to indicate if it is actually set.
836 *
837 * This means if the static address is not configured, this flag
838 * will never be set. If the address is configured, then if the
839 * address is actually used decides if the flag is set or not.
840 *
841 * For single mode LE only controllers and dual-mode controllers
842 * with BR/EDR disabled, the existence of the static address will
843 * be evaluated.
844 */
845 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
846 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
847 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
848 if (bacmp(&hdev->static_addr, BDADDR_ANY))
849 settings |= MGMT_SETTING_STATIC_ADDRESS;
850 }
851
852 return settings;
853}
854
855static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
856{
857 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
858}
859
860static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
861 struct hci_dev *hdev,
862 const void *data)
863{
864 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
865}
866
867u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
868{
869 struct mgmt_pending_cmd *cmd;
870
871 /* If there's a pending mgmt command the flags will not yet have
872 * their final values, so check for this first.
873 */
874 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
875 if (cmd) {
876 struct mgmt_mode *cp = cmd->param;
877 if (cp->val == 0x01)
878 return LE_AD_GENERAL;
879 else if (cp->val == 0x02)
880 return LE_AD_LIMITED;
881 } else {
882 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
883 return LE_AD_LIMITED;
884 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 return LE_AD_GENERAL;
886 }
887
888 return 0;
889}
890
891bool mgmt_get_connectable(struct hci_dev *hdev)
892{
893 struct mgmt_pending_cmd *cmd;
894
895 /* If there's a pending mgmt command the flag will not yet have
896 * it's final value, so check for this first.
897 */
898 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
899 if (cmd) {
900 struct mgmt_mode *cp = cmd->param;
901
902 return cp->val;
903 }
904
905 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
906}
907
908static void service_cache_off(struct work_struct *work)
909{
910 struct hci_dev *hdev = container_of(work, struct hci_dev,
911 service_cache.work);
912 struct hci_request req;
913
914 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
915 return;
916
917 hci_req_init(&req, hdev);
918
919 hci_dev_lock(hdev);
920
921 __hci_req_update_eir(&req);
922 __hci_req_update_class(&req);
923
924 hci_dev_unlock(hdev);
925
926 hci_req_run(&req, NULL);
927}
928
929static void rpa_expired(struct work_struct *work)
930{
931 struct hci_dev *hdev = container_of(work, struct hci_dev,
932 rpa_expired.work);
933 struct hci_request req;
934
935 BT_DBG("");
936
937 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
938
939 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
940 return;
941
942 /* The generation of a new RPA and programming it into the
943 * controller happens in the hci_req_enable_advertising()
944 * function.
945 */
946 hci_req_init(&req, hdev);
947 if (ext_adv_capable(hdev))
948 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
949 else
950 __hci_req_enable_advertising(&req);
951 hci_req_run(&req, NULL);
952}
953
954static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
955{
956 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
957 return;
958
959 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
960 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
961
962 /* Non-mgmt controlled devices get this bit set
963 * implicitly so that pairing works for them, however
964 * for mgmt we require user-space to explicitly enable
965 * it
966 */
967 hci_dev_clear_flag(hdev, HCI_BONDABLE);
968}
969
970static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
971 void *data, u16 data_len)
972{
973 struct mgmt_rp_read_info rp;
974
975 BT_DBG("sock %p %s", sk, hdev->name);
976
977 hci_dev_lock(hdev);
978
979 memset(&rp, 0, sizeof(rp));
980
981 bacpy(&rp.bdaddr, &hdev->bdaddr);
982
983 rp.version = hdev->hci_ver;
984 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
985
986 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
987 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
988
989 memcpy(rp.dev_class, hdev->dev_class, 3);
990
991 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
992 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
993
994 hci_dev_unlock(hdev);
995
996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
997 sizeof(rp));
998}
999
1000static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1001{
1002 u16 eir_len = 0;
1003 size_t name_len;
1004
1005 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1006 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1007 hdev->dev_class, 3);
1008
1009 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1010 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1011 hdev->appearance);
1012
1013 name_len = strlen(hdev->dev_name);
1014 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1015 hdev->dev_name, name_len);
1016
1017 name_len = strlen(hdev->short_name);
1018 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1019 hdev->short_name, name_len);
1020
1021 return eir_len;
1022}
1023
1024static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1025 void *data, u16 data_len)
1026{
1027 char buf[512];
1028 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1029 u16 eir_len;
1030
1031 BT_DBG("sock %p %s", sk, hdev->name);
1032
1033 memset(&buf, 0, sizeof(buf));
1034
1035 hci_dev_lock(hdev);
1036
1037 bacpy(&rp->bdaddr, &hdev->bdaddr);
1038
1039 rp->version = hdev->hci_ver;
1040 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1041
1042 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1043 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1044
1045
1046 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1047 rp->eir_len = cpu_to_le16(eir_len);
1048
1049 hci_dev_unlock(hdev);
1050
1051 /* If this command is called at least once, then the events
1052 * for class of device and local name changes are disabled
1053 * and only the new extended controller information event
1054 * is used.
1055 */
1056 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1057 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1058 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1059
1060 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1061 sizeof(*rp) + eir_len);
1062}
1063
1064static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1065{
1066 char buf[512];
1067 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1068 u16 eir_len;
1069
1070 memset(buf, 0, sizeof(buf));
1071
1072 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1073 ev->eir_len = cpu_to_le16(eir_len);
1074
1075 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1076 sizeof(*ev) + eir_len,
1077 HCI_MGMT_EXT_INFO_EVENTS, skip);
1078}
1079
1080static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1081{
1082 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1083
1084 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1085 sizeof(settings));
1086}
1087
1088static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1089{
1090 BT_DBG("%s status 0x%02x", hdev->name, status);
1091
1092 if (hci_conn_count(hdev) == 0) {
1093 cancel_delayed_work(&hdev->power_off);
1094 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1095 }
1096}
1097
1098void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1099{
1100 struct mgmt_ev_advertising_added ev;
1101
1102 ev.instance = instance;
1103
1104 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1105}
1106
1107void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1108 u8 instance)
1109{
1110 struct mgmt_ev_advertising_removed ev;
1111
1112 ev.instance = instance;
1113
1114 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1115}
1116
1117static void cancel_adv_timeout(struct hci_dev *hdev)
1118{
1119 if (hdev->adv_instance_timeout) {
1120 hdev->adv_instance_timeout = 0;
1121 cancel_delayed_work(&hdev->adv_instance_expire);
1122 }
1123}
1124
1125static int clean_up_hci_state(struct hci_dev *hdev)
1126{
1127 struct hci_request req;
1128 struct hci_conn *conn;
1129 bool discov_stopped;
1130 int err;
1131
1132 hci_req_init(&req, hdev);
1133
1134 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1135 test_bit(HCI_PSCAN, &hdev->flags)) {
1136 u8 scan = 0x00;
1137 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1138 }
1139
1140 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1141
1142 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1143 __hci_req_disable_advertising(&req);
1144
1145 discov_stopped = hci_req_stop_discovery(&req);
1146
1147 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1148 /* 0x15 == Terminated due to Power Off */
1149 __hci_abort_conn(&req, conn, 0x15);
1150 }
1151
1152 err = hci_req_run(&req, clean_up_hci_complete);
1153 if (!err && discov_stopped)
1154 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1155
1156 return err;
1157}
1158
1159static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1160 u16 len)
1161{
1162 struct mgmt_mode *cp = data;
1163 struct mgmt_pending_cmd *cmd;
1164 int err;
1165
1166 BT_DBG("request for %s", hdev->name);
1167
1168 if (cp->val != 0x00 && cp->val != 0x01)
1169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1170 MGMT_STATUS_INVALID_PARAMS);
1171
1172 hci_dev_lock(hdev);
1173
1174 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1176 MGMT_STATUS_BUSY);
1177 goto failed;
1178 }
1179
1180 if (!!cp->val == hdev_is_powered(hdev)) {
1181 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1182 goto failed;
1183 }
1184
1185 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1186 if (!cmd) {
1187 err = -ENOMEM;
1188 goto failed;
1189 }
1190
1191 if (cp->val) {
1192 queue_work(hdev->req_workqueue, &hdev->power_on);
1193 err = 0;
1194 } else {
1195 /* Disconnect connections, stop scans, etc */
1196 err = clean_up_hci_state(hdev);
1197 if (!err)
1198 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1199 HCI_POWER_OFF_TIMEOUT);
1200
1201 /* ENODATA means there were no HCI commands queued */
1202 if (err == -ENODATA) {
1203 cancel_delayed_work(&hdev->power_off);
1204 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1205 err = 0;
1206 }
1207 }
1208
1209failed:
1210 hci_dev_unlock(hdev);
1211 return err;
1212}
1213
1214static int new_settings(struct hci_dev *hdev, struct sock *skip)
1215{
1216 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1217
1218 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1219 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1220}
1221
1222int mgmt_new_settings(struct hci_dev *hdev)
1223{
1224 return new_settings(hdev, NULL);
1225}
1226
1227struct cmd_lookup {
1228 struct sock *sk;
1229 struct hci_dev *hdev;
1230 u8 mgmt_status;
1231};
1232
1233static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1234{
1235 struct cmd_lookup *match = data;
1236
1237 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1238
1239 list_del(&cmd->list);
1240
1241 if (match->sk == NULL) {
1242 match->sk = cmd->sk;
1243 sock_hold(match->sk);
1244 }
1245
1246 mgmt_pending_free(cmd);
1247}
1248
1249static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1250{
1251 u8 *status = data;
1252
1253 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1254 mgmt_pending_remove(cmd);
1255}
1256
1257static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1258{
1259 if (cmd->cmd_complete) {
1260 u8 *status = data;
1261
1262 cmd->cmd_complete(cmd, *status);
1263 mgmt_pending_remove(cmd);
1264
1265 return;
1266 }
1267
1268 cmd_status_rsp(cmd, data);
1269}
1270
1271static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1272{
1273 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1274 cmd->param, cmd->param_len);
1275}
1276
1277static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1278{
1279 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1280 cmd->param, sizeof(struct mgmt_addr_info));
1281}
1282
1283static u8 mgmt_bredr_support(struct hci_dev *hdev)
1284{
1285 if (!lmp_bredr_capable(hdev))
1286 return MGMT_STATUS_NOT_SUPPORTED;
1287 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1288 return MGMT_STATUS_REJECTED;
1289 else
1290 return MGMT_STATUS_SUCCESS;
1291}
1292
1293static u8 mgmt_le_support(struct hci_dev *hdev)
1294{
1295 if (!lmp_le_capable(hdev))
1296 return MGMT_STATUS_NOT_SUPPORTED;
1297 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1298 return MGMT_STATUS_REJECTED;
1299 else
1300 return MGMT_STATUS_SUCCESS;
1301}
1302
1303void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1304{
1305 struct mgmt_pending_cmd *cmd;
1306
1307 BT_DBG("status 0x%02x", status);
1308
1309 hci_dev_lock(hdev);
1310
1311 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1312 if (!cmd)
1313 goto unlock;
1314
1315 if (status) {
1316 u8 mgmt_err = mgmt_status(status);
1317 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1318 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1319 goto remove_cmd;
1320 }
1321
1322 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1323 hdev->discov_timeout > 0) {
1324 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1325 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1326 }
1327
1328 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1329 new_settings(hdev, cmd->sk);
1330
1331remove_cmd:
1332 mgmt_pending_remove(cmd);
1333
1334unlock:
1335 hci_dev_unlock(hdev);
1336}
1337
1338static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1339 u16 len)
1340{
1341 struct mgmt_cp_set_discoverable *cp = data;
1342 struct mgmt_pending_cmd *cmd;
1343 u16 timeout;
1344 int err;
1345
1346 BT_DBG("request for %s", hdev->name);
1347
1348 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1349 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 MGMT_STATUS_REJECTED);
1352
1353 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1355 MGMT_STATUS_INVALID_PARAMS);
1356
1357 timeout = __le16_to_cpu(cp->timeout);
1358
1359 /* Disabling discoverable requires that no timeout is set,
1360 * and enabling limited discoverable requires a timeout.
1361 */
1362 if ((cp->val == 0x00 && timeout > 0) ||
1363 (cp->val == 0x02 && timeout == 0))
1364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1365 MGMT_STATUS_INVALID_PARAMS);
1366
1367 hci_dev_lock(hdev);
1368
1369 if (!hdev_is_powered(hdev) && timeout > 0) {
1370 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1371 MGMT_STATUS_NOT_POWERED);
1372 goto failed;
1373 }
1374
1375 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1376 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1378 MGMT_STATUS_BUSY);
1379 goto failed;
1380 }
1381
1382 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1383 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 MGMT_STATUS_REJECTED);
1385 goto failed;
1386 }
1387
1388 if (!hdev_is_powered(hdev)) {
1389 bool changed = false;
1390
1391 /* Setting limited discoverable when powered off is
1392 * not a valid operation since it requires a timeout
1393 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1394 */
1395 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1396 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1397 changed = true;
1398 }
1399
1400 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1401 if (err < 0)
1402 goto failed;
1403
1404 if (changed)
1405 err = new_settings(hdev, sk);
1406
1407 goto failed;
1408 }
1409
1410 /* If the current mode is the same, then just update the timeout
1411 * value with the new value. And if only the timeout gets updated,
1412 * then no need for any HCI transactions.
1413 */
1414 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1415 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1416 HCI_LIMITED_DISCOVERABLE)) {
1417 cancel_delayed_work(&hdev->discov_off);
1418 hdev->discov_timeout = timeout;
1419
1420 if (cp->val && hdev->discov_timeout > 0) {
1421 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1422 queue_delayed_work(hdev->req_workqueue,
1423 &hdev->discov_off, to);
1424 }
1425
1426 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1427 goto failed;
1428 }
1429
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1431 if (!cmd) {
1432 err = -ENOMEM;
1433 goto failed;
1434 }
1435
1436 /* Cancel any potential discoverable timeout that might be
1437 * still active and store new timeout value. The arming of
1438 * the timeout happens in the complete handler.
1439 */
1440 cancel_delayed_work(&hdev->discov_off);
1441 hdev->discov_timeout = timeout;
1442
1443 if (cp->val)
1444 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1445 else
1446 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1447
1448 /* Limited discoverable mode */
1449 if (cp->val == 0x02)
1450 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1451 else
1452 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1453
1454 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1455 err = 0;
1456
1457failed:
1458 hci_dev_unlock(hdev);
1459 return err;
1460}
1461
1462void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1463{
1464 struct mgmt_pending_cmd *cmd;
1465
1466 BT_DBG("status 0x%02x", status);
1467
1468 hci_dev_lock(hdev);
1469
1470 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1471 if (!cmd)
1472 goto unlock;
1473
1474 if (status) {
1475 u8 mgmt_err = mgmt_status(status);
1476 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1477 goto remove_cmd;
1478 }
1479
1480 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1481 new_settings(hdev, cmd->sk);
1482
1483remove_cmd:
1484 mgmt_pending_remove(cmd);
1485
1486unlock:
1487 hci_dev_unlock(hdev);
1488}
1489
1490static int set_connectable_update_settings(struct hci_dev *hdev,
1491 struct sock *sk, u8 val)
1492{
1493 bool changed = false;
1494 int err;
1495
1496 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1497 changed = true;
1498
1499 if (val) {
1500 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1501 } else {
1502 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1503 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1504 }
1505
1506 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1507 if (err < 0)
1508 return err;
1509
1510 if (changed) {
1511 hci_req_update_scan(hdev);
1512 hci_update_background_scan(hdev);
1513 return new_settings(hdev, sk);
1514 }
1515
1516 return 0;
1517}
1518
1519static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1520 u16 len)
1521{
1522 struct mgmt_mode *cp = data;
1523 struct mgmt_pending_cmd *cmd;
1524 int err;
1525
1526 BT_DBG("request for %s", hdev->name);
1527
1528 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1529 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1531 MGMT_STATUS_REJECTED);
1532
1533 if (cp->val != 0x00 && cp->val != 0x01)
1534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1535 MGMT_STATUS_INVALID_PARAMS);
1536
1537 hci_dev_lock(hdev);
1538
1539 if (!hdev_is_powered(hdev)) {
1540 err = set_connectable_update_settings(hdev, sk, cp->val);
1541 goto failed;
1542 }
1543
1544 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1545 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1546 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1547 MGMT_STATUS_BUSY);
1548 goto failed;
1549 }
1550
1551 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1552 if (!cmd) {
1553 err = -ENOMEM;
1554 goto failed;
1555 }
1556
1557 if (cp->val) {
1558 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1559 } else {
1560 if (hdev->discov_timeout > 0)
1561 cancel_delayed_work(&hdev->discov_off);
1562
1563 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1564 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1565 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1566 }
1567
1568 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1569 err = 0;
1570
1571failed:
1572 hci_dev_unlock(hdev);
1573 return err;
1574}
1575
1576static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1577 u16 len)
1578{
1579 struct mgmt_mode *cp = data;
1580 bool changed;
1581 int err;
1582
1583 BT_DBG("request for %s", hdev->name);
1584
1585 if (cp->val != 0x00 && cp->val != 0x01)
1586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1587 MGMT_STATUS_INVALID_PARAMS);
1588
1589 hci_dev_lock(hdev);
1590
1591 if (cp->val)
1592 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1593 else
1594 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1595
1596 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1597 if (err < 0)
1598 goto unlock;
1599
1600 if (changed) {
1601 /* In limited privacy mode the change of bondable mode
1602 * may affect the local advertising address.
1603 */
1604 if (hdev_is_powered(hdev) &&
1605 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1606 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1607 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1608 queue_work(hdev->req_workqueue,
1609 &hdev->discoverable_update);
1610
1611 err = new_settings(hdev, sk);
1612 }
1613
1614unlock:
1615 hci_dev_unlock(hdev);
1616 return err;
1617}
1618
1619static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1620 u16 len)
1621{
1622 struct mgmt_mode *cp = data;
1623 struct mgmt_pending_cmd *cmd;
1624 u8 val, status;
1625 int err;
1626
1627 BT_DBG("request for %s", hdev->name);
1628
1629 status = mgmt_bredr_support(hdev);
1630 if (status)
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1632 status);
1633
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1636 MGMT_STATUS_INVALID_PARAMS);
1637
1638 hci_dev_lock(hdev);
1639
1640 if (!hdev_is_powered(hdev)) {
1641 bool changed = false;
1642
1643 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1644 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1645 changed = true;
1646 }
1647
1648 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1649 if (err < 0)
1650 goto failed;
1651
1652 if (changed)
1653 err = new_settings(hdev, sk);
1654
1655 goto failed;
1656 }
1657
1658 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1660 MGMT_STATUS_BUSY);
1661 goto failed;
1662 }
1663
1664 val = !!cp->val;
1665
1666 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1667 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1668 goto failed;
1669 }
1670
1671 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1672 if (!cmd) {
1673 err = -ENOMEM;
1674 goto failed;
1675 }
1676
1677 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1678 if (err < 0) {
1679 mgmt_pending_remove(cmd);
1680 goto failed;
1681 }
1682
1683failed:
1684 hci_dev_unlock(hdev);
1685 return err;
1686}
1687
1688static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1689{
1690 struct mgmt_mode *cp = data;
1691 struct mgmt_pending_cmd *cmd;
1692 u8 status;
1693 int err;
1694
1695 BT_DBG("request for %s", hdev->name);
1696
1697 status = mgmt_bredr_support(hdev);
1698 if (status)
1699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1700
1701 if (!lmp_ssp_capable(hdev))
1702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 MGMT_STATUS_NOT_SUPPORTED);
1704
1705 if (cp->val != 0x00 && cp->val != 0x01)
1706 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1707 MGMT_STATUS_INVALID_PARAMS);
1708
1709 hci_dev_lock(hdev);
1710
1711 if (!hdev_is_powered(hdev)) {
1712 bool changed;
1713
1714 if (cp->val) {
1715 changed = !hci_dev_test_and_set_flag(hdev,
1716 HCI_SSP_ENABLED);
1717 } else {
1718 changed = hci_dev_test_and_clear_flag(hdev,
1719 HCI_SSP_ENABLED);
1720 if (!changed)
1721 changed = hci_dev_test_and_clear_flag(hdev,
1722 HCI_HS_ENABLED);
1723 else
1724 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1725 }
1726
1727 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1728 if (err < 0)
1729 goto failed;
1730
1731 if (changed)
1732 err = new_settings(hdev, sk);
1733
1734 goto failed;
1735 }
1736
1737 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1739 MGMT_STATUS_BUSY);
1740 goto failed;
1741 }
1742
1743 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1744 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1745 goto failed;
1746 }
1747
1748 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1749 if (!cmd) {
1750 err = -ENOMEM;
1751 goto failed;
1752 }
1753
1754 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1755 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1756 sizeof(cp->val), &cp->val);
1757
1758 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1759 if (err < 0) {
1760 mgmt_pending_remove(cmd);
1761 goto failed;
1762 }
1763
1764failed:
1765 hci_dev_unlock(hdev);
1766 return err;
1767}
1768
1769static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1770{
1771 struct mgmt_mode *cp = data;
1772 bool changed;
1773 u8 status;
1774 int err;
1775
1776 BT_DBG("request for %s", hdev->name);
1777
1778 if (!IS_ENABLED(CONFIG_BT_HS))
1779 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1780 MGMT_STATUS_NOT_SUPPORTED);
1781
1782 status = mgmt_bredr_support(hdev);
1783 if (status)
1784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1785
1786 if (!lmp_ssp_capable(hdev))
1787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788 MGMT_STATUS_NOT_SUPPORTED);
1789
1790 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1792 MGMT_STATUS_REJECTED);
1793
1794 if (cp->val != 0x00 && cp->val != 0x01)
1795 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1796 MGMT_STATUS_INVALID_PARAMS);
1797
1798 hci_dev_lock(hdev);
1799
1800 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1802 MGMT_STATUS_BUSY);
1803 goto unlock;
1804 }
1805
1806 if (cp->val) {
1807 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1808 } else {
1809 if (hdev_is_powered(hdev)) {
1810 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1811 MGMT_STATUS_REJECTED);
1812 goto unlock;
1813 }
1814
1815 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1816 }
1817
1818 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1819 if (err < 0)
1820 goto unlock;
1821
1822 if (changed)
1823 err = new_settings(hdev, sk);
1824
1825unlock:
1826 hci_dev_unlock(hdev);
1827 return err;
1828}
1829
1830static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1831{
1832 struct cmd_lookup match = { NULL, hdev };
1833
1834 hci_dev_lock(hdev);
1835
1836 if (status) {
1837 u8 mgmt_err = mgmt_status(status);
1838
1839 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1840 &mgmt_err);
1841 goto unlock;
1842 }
1843
1844 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1845
1846 new_settings(hdev, match.sk);
1847
1848 if (match.sk)
1849 sock_put(match.sk);
1850
1851 /* Make sure the controller has a good default for
1852 * advertising data. Restrict the update to when LE
1853 * has actually been enabled. During power on, the
1854 * update in powered_update_hci will take care of it.
1855 */
1856 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1857 struct hci_request req;
1858 hci_req_init(&req, hdev);
1859 if (ext_adv_capable(hdev)) {
1860 int err;
1861
1862 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1863 if (!err)
1864 __hci_req_update_scan_rsp_data(&req, 0x00);
1865 } else {
1866 __hci_req_update_adv_data(&req, 0x00);
1867 __hci_req_update_scan_rsp_data(&req, 0x00);
1868 }
1869 hci_req_run(&req, NULL);
1870 hci_update_background_scan(hdev);
1871 }
1872
1873unlock:
1874 hci_dev_unlock(hdev);
1875}
1876
1877static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1878{
1879 struct mgmt_mode *cp = data;
1880 struct hci_cp_write_le_host_supported hci_cp;
1881 struct mgmt_pending_cmd *cmd;
1882 struct hci_request req;
1883 int err;
1884 u8 val, enabled;
1885
1886 BT_DBG("request for %s", hdev->name);
1887
1888 if (!lmp_le_capable(hdev))
1889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1890 MGMT_STATUS_NOT_SUPPORTED);
1891
1892 if (cp->val != 0x00 && cp->val != 0x01)
1893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1894 MGMT_STATUS_INVALID_PARAMS);
1895
1896 /* Bluetooth single mode LE only controllers or dual-mode
1897 * controllers configured as LE only devices, do not allow
1898 * switching LE off. These have either LE enabled explicitly
1899 * or BR/EDR has been previously switched off.
1900 *
1901 * When trying to enable an already enabled LE, then gracefully
1902 * send a positive response. Trying to disable it however will
1903 * result into rejection.
1904 */
1905 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1906 if (cp->val == 0x01)
1907 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1908
1909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1910 MGMT_STATUS_REJECTED);
1911 }
1912
1913 hci_dev_lock(hdev);
1914
1915 val = !!cp->val;
1916 enabled = lmp_host_le_capable(hdev);
1917
1918 if (!val)
1919 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1920
1921 if (!hdev_is_powered(hdev) || val == enabled) {
1922 bool changed = false;
1923
1924 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1925 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1926 changed = true;
1927 }
1928
1929 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1930 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1931 changed = true;
1932 }
1933
1934 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1935 if (err < 0)
1936 goto unlock;
1937
1938 if (changed)
1939 err = new_settings(hdev, sk);
1940
1941 goto unlock;
1942 }
1943
1944 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1945 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1946 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1947 MGMT_STATUS_BUSY);
1948 goto unlock;
1949 }
1950
1951 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1952 if (!cmd) {
1953 err = -ENOMEM;
1954 goto unlock;
1955 }
1956
1957 hci_req_init(&req, hdev);
1958
1959 memset(&hci_cp, 0, sizeof(hci_cp));
1960
1961 if (val) {
1962 hci_cp.le = val;
1963 hci_cp.simul = 0x00;
1964 } else {
1965 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1966 __hci_req_disable_advertising(&req);
1967
1968 if (ext_adv_capable(hdev))
1969 __hci_req_clear_ext_adv_sets(&req);
1970 }
1971
1972 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1973 &hci_cp);
1974
1975 err = hci_req_run(&req, le_enable_complete);
1976 if (err < 0)
1977 mgmt_pending_remove(cmd);
1978
1979unlock:
1980 hci_dev_unlock(hdev);
1981 return err;
1982}
1983
1984/* This is a helper function to test for pending mgmt commands that can
1985 * cause CoD or EIR HCI commands. We can only allow one such pending
1986 * mgmt command at a time since otherwise we cannot easily track what
1987 * the current values are, will be, and based on that calculate if a new
1988 * HCI command needs to be sent and if yes with what value.
1989 */
1990static bool pending_eir_or_class(struct hci_dev *hdev)
1991{
1992 struct mgmt_pending_cmd *cmd;
1993
1994 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1995 switch (cmd->opcode) {
1996 case MGMT_OP_ADD_UUID:
1997 case MGMT_OP_REMOVE_UUID:
1998 case MGMT_OP_SET_DEV_CLASS:
1999 case MGMT_OP_SET_POWERED:
2000 return true;
2001 }
2002 }
2003
2004 return false;
2005}
2006
2007static const u8 bluetooth_base_uuid[] = {
2008 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2009 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2010};
2011
2012static u8 get_uuid_size(const u8 *uuid)
2013{
2014 u32 val;
2015
2016 if (memcmp(uuid, bluetooth_base_uuid, 12))
2017 return 128;
2018
2019 val = get_unaligned_le32(&uuid[12]);
2020 if (val > 0xffff)
2021 return 32;
2022
2023 return 16;
2024}
2025
2026static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2027{
2028 struct mgmt_pending_cmd *cmd;
2029
2030 hci_dev_lock(hdev);
2031
2032 cmd = pending_find(mgmt_op, hdev);
2033 if (!cmd)
2034 goto unlock;
2035
2036 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2037 mgmt_status(status), hdev->dev_class, 3);
2038
2039 mgmt_pending_remove(cmd);
2040
2041unlock:
2042 hci_dev_unlock(hdev);
2043}
2044
2045static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2046{
2047 BT_DBG("status 0x%02x", status);
2048
2049 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2050}
2051
2052static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2053{
2054 struct mgmt_cp_add_uuid *cp = data;
2055 struct mgmt_pending_cmd *cmd;
2056 struct hci_request req;
2057 struct bt_uuid *uuid;
2058 int err;
2059
2060 BT_DBG("request for %s", hdev->name);
2061
2062 hci_dev_lock(hdev);
2063
2064 if (pending_eir_or_class(hdev)) {
2065 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2066 MGMT_STATUS_BUSY);
2067 goto failed;
2068 }
2069
2070 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2071 if (!uuid) {
2072 err = -ENOMEM;
2073 goto failed;
2074 }
2075
2076 memcpy(uuid->uuid, cp->uuid, 16);
2077 uuid->svc_hint = cp->svc_hint;
2078 uuid->size = get_uuid_size(cp->uuid);
2079
2080 list_add_tail(&uuid->list, &hdev->uuids);
2081
2082 hci_req_init(&req, hdev);
2083
2084 __hci_req_update_class(&req);
2085 __hci_req_update_eir(&req);
2086
2087 err = hci_req_run(&req, add_uuid_complete);
2088 if (err < 0) {
2089 if (err != -ENODATA)
2090 goto failed;
2091
2092 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2093 hdev->dev_class, 3);
2094 goto failed;
2095 }
2096
2097 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2098 if (!cmd) {
2099 err = -ENOMEM;
2100 goto failed;
2101 }
2102
2103 err = 0;
2104
2105failed:
2106 hci_dev_unlock(hdev);
2107 return err;
2108}
2109
2110static bool enable_service_cache(struct hci_dev *hdev)
2111{
2112 if (!hdev_is_powered(hdev))
2113 return false;
2114
2115 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2116 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2117 CACHE_TIMEOUT);
2118 return true;
2119 }
2120
2121 return false;
2122}
2123
2124static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2125{
2126 BT_DBG("status 0x%02x", status);
2127
2128 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2129}
2130
2131static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2132 u16 len)
2133{
2134 struct mgmt_cp_remove_uuid *cp = data;
2135 struct mgmt_pending_cmd *cmd;
2136 struct bt_uuid *match, *tmp;
2137 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2138 struct hci_request req;
2139 int err, found;
2140
2141 BT_DBG("request for %s", hdev->name);
2142
2143 hci_dev_lock(hdev);
2144
2145 if (pending_eir_or_class(hdev)) {
2146 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2147 MGMT_STATUS_BUSY);
2148 goto unlock;
2149 }
2150
2151 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2152 hci_uuids_clear(hdev);
2153
2154 if (enable_service_cache(hdev)) {
2155 err = mgmt_cmd_complete(sk, hdev->id,
2156 MGMT_OP_REMOVE_UUID,
2157 0, hdev->dev_class, 3);
2158 goto unlock;
2159 }
2160
2161 goto update_class;
2162 }
2163
2164 found = 0;
2165
2166 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2167 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2168 continue;
2169
2170 list_del(&match->list);
2171 kfree(match);
2172 found++;
2173 }
2174
2175 if (found == 0) {
2176 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2177 MGMT_STATUS_INVALID_PARAMS);
2178 goto unlock;
2179 }
2180
2181update_class:
2182 hci_req_init(&req, hdev);
2183
2184 __hci_req_update_class(&req);
2185 __hci_req_update_eir(&req);
2186
2187 err = hci_req_run(&req, remove_uuid_complete);
2188 if (err < 0) {
2189 if (err != -ENODATA)
2190 goto unlock;
2191
2192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2193 hdev->dev_class, 3);
2194 goto unlock;
2195 }
2196
2197 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2198 if (!cmd) {
2199 err = -ENOMEM;
2200 goto unlock;
2201 }
2202
2203 err = 0;
2204
2205unlock:
2206 hci_dev_unlock(hdev);
2207 return err;
2208}
2209
2210static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2211{
2212 BT_DBG("status 0x%02x", status);
2213
2214 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2215}
2216
2217static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2218 u16 len)
2219{
2220 struct mgmt_cp_set_dev_class *cp = data;
2221 struct mgmt_pending_cmd *cmd;
2222 struct hci_request req;
2223 int err;
2224
2225 BT_DBG("request for %s", hdev->name);
2226
2227 if (!lmp_bredr_capable(hdev))
2228 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2229 MGMT_STATUS_NOT_SUPPORTED);
2230
2231 hci_dev_lock(hdev);
2232
2233 if (pending_eir_or_class(hdev)) {
2234 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2235 MGMT_STATUS_BUSY);
2236 goto unlock;
2237 }
2238
2239 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2240 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2241 MGMT_STATUS_INVALID_PARAMS);
2242 goto unlock;
2243 }
2244
2245 hdev->major_class = cp->major;
2246 hdev->minor_class = cp->minor;
2247
2248 if (!hdev_is_powered(hdev)) {
2249 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2250 hdev->dev_class, 3);
2251 goto unlock;
2252 }
2253
2254 hci_req_init(&req, hdev);
2255
2256 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2257 hci_dev_unlock(hdev);
2258 cancel_delayed_work_sync(&hdev->service_cache);
2259 hci_dev_lock(hdev);
2260 __hci_req_update_eir(&req);
2261 }
2262
2263 __hci_req_update_class(&req);
2264
2265 err = hci_req_run(&req, set_class_complete);
2266 if (err < 0) {
2267 if (err != -ENODATA)
2268 goto unlock;
2269
2270 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2271 hdev->dev_class, 3);
2272 goto unlock;
2273 }
2274
2275 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2276 if (!cmd) {
2277 err = -ENOMEM;
2278 goto unlock;
2279 }
2280
2281 err = 0;
2282
2283unlock:
2284 hci_dev_unlock(hdev);
2285 return err;
2286}
2287
2288static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2289 u16 len)
2290{
2291 struct mgmt_cp_load_link_keys *cp = data;
2292 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2293 sizeof(struct mgmt_link_key_info));
2294 u16 key_count, expected_len;
2295 bool changed;
2296 int i;
2297
2298 BT_DBG("request for %s", hdev->name);
2299
2300 if (!lmp_bredr_capable(hdev))
2301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2302 MGMT_STATUS_NOT_SUPPORTED);
2303
2304 key_count = __le16_to_cpu(cp->key_count);
2305 if (key_count > max_key_count) {
2306 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2307 key_count);
2308 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2309 MGMT_STATUS_INVALID_PARAMS);
2310 }
2311
2312 expected_len = struct_size(cp, keys, key_count);
2313 if (expected_len != len) {
2314 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2315 expected_len, len);
2316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2317 MGMT_STATUS_INVALID_PARAMS);
2318 }
2319
2320 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2322 MGMT_STATUS_INVALID_PARAMS);
2323
2324 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2325 key_count);
2326
2327 for (i = 0; i < key_count; i++) {
2328 struct mgmt_link_key_info *key = &cp->keys[i];
2329
2330 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2331 return mgmt_cmd_status(sk, hdev->id,
2332 MGMT_OP_LOAD_LINK_KEYS,
2333 MGMT_STATUS_INVALID_PARAMS);
2334 }
2335
2336 hci_dev_lock(hdev);
2337
2338 hci_link_keys_clear(hdev);
2339
2340 if (cp->debug_keys)
2341 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2342 else
2343 changed = hci_dev_test_and_clear_flag(hdev,
2344 HCI_KEEP_DEBUG_KEYS);
2345
2346 if (changed)
2347 new_settings(hdev, NULL);
2348
2349 for (i = 0; i < key_count; i++) {
2350 struct mgmt_link_key_info *key = &cp->keys[i];
2351
2352 /* Always ignore debug keys and require a new pairing if
2353 * the user wants to use them.
2354 */
2355 if (key->type == HCI_LK_DEBUG_COMBINATION)
2356 continue;
2357
2358 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2359 key->type, key->pin_len, NULL);
2360 }
2361
2362 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2363
2364 hci_dev_unlock(hdev);
2365
2366 return 0;
2367}
2368
2369static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2370 u8 addr_type, struct sock *skip_sk)
2371{
2372 struct mgmt_ev_device_unpaired ev;
2373
2374 bacpy(&ev.addr.bdaddr, bdaddr);
2375 ev.addr.type = addr_type;
2376
2377 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2378 skip_sk);
2379}
2380
2381static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2382 u16 len)
2383{
2384 struct mgmt_cp_unpair_device *cp = data;
2385 struct mgmt_rp_unpair_device rp;
2386 struct hci_conn_params *params;
2387 struct mgmt_pending_cmd *cmd;
2388 struct hci_conn *conn;
2389 u8 addr_type;
2390 int err;
2391
2392 memset(&rp, 0, sizeof(rp));
2393 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2394 rp.addr.type = cp->addr.type;
2395
2396 if (!bdaddr_type_is_valid(cp->addr.type))
2397 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2398 MGMT_STATUS_INVALID_PARAMS,
2399 &rp, sizeof(rp));
2400
2401 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2402 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2403 MGMT_STATUS_INVALID_PARAMS,
2404 &rp, sizeof(rp));
2405
2406 hci_dev_lock(hdev);
2407
2408 if (!hdev_is_powered(hdev)) {
2409 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2410 MGMT_STATUS_NOT_POWERED, &rp,
2411 sizeof(rp));
2412 goto unlock;
2413 }
2414
2415 if (cp->addr.type == BDADDR_BREDR) {
2416 /* If disconnection is requested, then look up the
2417 * connection. If the remote device is connected, it
2418 * will be later used to terminate the link.
2419 *
2420 * Setting it to NULL explicitly will cause no
2421 * termination of the link.
2422 */
2423 if (cp->disconnect)
2424 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2425 &cp->addr.bdaddr);
2426 else
2427 conn = NULL;
2428
2429 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2430 if (err < 0) {
2431 err = mgmt_cmd_complete(sk, hdev->id,
2432 MGMT_OP_UNPAIR_DEVICE,
2433 MGMT_STATUS_NOT_PAIRED, &rp,
2434 sizeof(rp));
2435 goto unlock;
2436 }
2437
2438 goto done;
2439 }
2440
2441 /* LE address type */
2442 addr_type = le_addr_type(cp->addr.type);
2443
2444 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2445 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2446 if (err < 0) {
2447 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2448 MGMT_STATUS_NOT_PAIRED, &rp,
2449 sizeof(rp));
2450 goto unlock;
2451 }
2452
2453 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2454 if (!conn) {
2455 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2456 goto done;
2457 }
2458
2459
2460 /* Defer clearing up the connection parameters until closing to
2461 * give a chance of keeping them if a repairing happens.
2462 */
2463 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2464
2465 /* Disable auto-connection parameters if present */
2466 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2467 if (params) {
2468 if (params->explicit_connect)
2469 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2470 else
2471 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2472 }
2473
2474 /* If disconnection is not requested, then clear the connection
2475 * variable so that the link is not terminated.
2476 */
2477 if (!cp->disconnect)
2478 conn = NULL;
2479
2480done:
2481 /* If the connection variable is set, then termination of the
2482 * link is requested.
2483 */
2484 if (!conn) {
2485 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2486 &rp, sizeof(rp));
2487 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2488 goto unlock;
2489 }
2490
2491 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2492 sizeof(*cp));
2493 if (!cmd) {
2494 err = -ENOMEM;
2495 goto unlock;
2496 }
2497
2498 cmd->cmd_complete = addr_cmd_complete;
2499
2500 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2501 if (err < 0)
2502 mgmt_pending_remove(cmd);
2503
2504unlock:
2505 hci_dev_unlock(hdev);
2506 return err;
2507}
2508
2509static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2510 u16 len)
2511{
2512 struct mgmt_cp_disconnect *cp = data;
2513 struct mgmt_rp_disconnect rp;
2514 struct mgmt_pending_cmd *cmd;
2515 struct hci_conn *conn;
2516 int err;
2517
2518 BT_DBG("");
2519
2520 memset(&rp, 0, sizeof(rp));
2521 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2522 rp.addr.type = cp->addr.type;
2523
2524 if (!bdaddr_type_is_valid(cp->addr.type))
2525 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2526 MGMT_STATUS_INVALID_PARAMS,
2527 &rp, sizeof(rp));
2528
2529 hci_dev_lock(hdev);
2530
2531 if (!test_bit(HCI_UP, &hdev->flags)) {
2532 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2533 MGMT_STATUS_NOT_POWERED, &rp,
2534 sizeof(rp));
2535 goto failed;
2536 }
2537
2538 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2539 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2540 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2541 goto failed;
2542 }
2543
2544 if (cp->addr.type == BDADDR_BREDR)
2545 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2546 &cp->addr.bdaddr);
2547 else
2548 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2549 le_addr_type(cp->addr.type));
2550
2551 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2552 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2553 MGMT_STATUS_NOT_CONNECTED, &rp,
2554 sizeof(rp));
2555 goto failed;
2556 }
2557
2558 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2559 if (!cmd) {
2560 err = -ENOMEM;
2561 goto failed;
2562 }
2563
2564 cmd->cmd_complete = generic_cmd_complete;
2565
2566 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2567 if (err < 0)
2568 mgmt_pending_remove(cmd);
2569
2570failed:
2571 hci_dev_unlock(hdev);
2572 return err;
2573}
2574
2575static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2576{
2577 switch (link_type) {
2578 case LE_LINK:
2579 switch (addr_type) {
2580 case ADDR_LE_DEV_PUBLIC:
2581 return BDADDR_LE_PUBLIC;
2582
2583 default:
2584 /* Fallback to LE Random address type */
2585 return BDADDR_LE_RANDOM;
2586 }
2587
2588 default:
2589 /* Fallback to BR/EDR type */
2590 return BDADDR_BREDR;
2591 }
2592}
2593
2594static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2595 u16 data_len)
2596{
2597 struct mgmt_rp_get_connections *rp;
2598 struct hci_conn *c;
2599 int err;
2600 u16 i;
2601
2602 BT_DBG("");
2603
2604 hci_dev_lock(hdev);
2605
2606 if (!hdev_is_powered(hdev)) {
2607 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2608 MGMT_STATUS_NOT_POWERED);
2609 goto unlock;
2610 }
2611
2612 i = 0;
2613 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2614 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2615 i++;
2616 }
2617
2618 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2619 if (!rp) {
2620 err = -ENOMEM;
2621 goto unlock;
2622 }
2623
2624 i = 0;
2625 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2626 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2627 continue;
2628 bacpy(&rp->addr[i].bdaddr, &c->dst);
2629 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2630 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2631 continue;
2632 i++;
2633 }
2634
2635 rp->conn_count = cpu_to_le16(i);
2636
2637 /* Recalculate length in case of filtered SCO connections, etc */
2638 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2639 struct_size(rp, addr, i));
2640
2641 kfree(rp);
2642
2643unlock:
2644 hci_dev_unlock(hdev);
2645 return err;
2646}
2647
2648static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2649 struct mgmt_cp_pin_code_neg_reply *cp)
2650{
2651 struct mgmt_pending_cmd *cmd;
2652 int err;
2653
2654 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2655 sizeof(*cp));
2656 if (!cmd)
2657 return -ENOMEM;
2658
2659 cmd->cmd_complete = addr_cmd_complete;
2660
2661 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2662 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2663 if (err < 0)
2664 mgmt_pending_remove(cmd);
2665
2666 return err;
2667}
2668
2669static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2670 u16 len)
2671{
2672 struct hci_conn *conn;
2673 struct mgmt_cp_pin_code_reply *cp = data;
2674 struct hci_cp_pin_code_reply reply;
2675 struct mgmt_pending_cmd *cmd;
2676 int err;
2677
2678 BT_DBG("");
2679
2680 hci_dev_lock(hdev);
2681
2682 if (!hdev_is_powered(hdev)) {
2683 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2684 MGMT_STATUS_NOT_POWERED);
2685 goto failed;
2686 }
2687
2688 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2689 if (!conn) {
2690 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2691 MGMT_STATUS_NOT_CONNECTED);
2692 goto failed;
2693 }
2694
2695 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2696 struct mgmt_cp_pin_code_neg_reply ncp;
2697
2698 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2699
2700 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2701
2702 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2703 if (err >= 0)
2704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2705 MGMT_STATUS_INVALID_PARAMS);
2706
2707 goto failed;
2708 }
2709
2710 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2711 if (!cmd) {
2712 err = -ENOMEM;
2713 goto failed;
2714 }
2715
2716 cmd->cmd_complete = addr_cmd_complete;
2717
2718 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2719 reply.pin_len = cp->pin_len;
2720 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2721
2722 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2723 if (err < 0)
2724 mgmt_pending_remove(cmd);
2725
2726failed:
2727 hci_dev_unlock(hdev);
2728 return err;
2729}
2730
2731static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2732 u16 len)
2733{
2734 struct mgmt_cp_set_io_capability *cp = data;
2735
2736 BT_DBG("");
2737
2738 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2740 MGMT_STATUS_INVALID_PARAMS);
2741
2742 hci_dev_lock(hdev);
2743
2744 hdev->io_capability = cp->io_capability;
2745
2746 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2747 hdev->io_capability);
2748
2749 hci_dev_unlock(hdev);
2750
2751 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2752 NULL, 0);
2753}
2754
2755static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2756{
2757 struct hci_dev *hdev = conn->hdev;
2758 struct mgmt_pending_cmd *cmd;
2759
2760 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2761 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2762 continue;
2763
2764 if (cmd->user_data != conn)
2765 continue;
2766
2767 return cmd;
2768 }
2769
2770 return NULL;
2771}
2772
2773static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2774{
2775 struct mgmt_rp_pair_device rp;
2776 struct hci_conn *conn = cmd->user_data;
2777 int err;
2778
2779 bacpy(&rp.addr.bdaddr, &conn->dst);
2780 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2781
2782 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2783 status, &rp, sizeof(rp));
2784
2785 /* So we don't get further callbacks for this connection */
2786 conn->connect_cfm_cb = NULL;
2787 conn->security_cfm_cb = NULL;
2788 conn->disconn_cfm_cb = NULL;
2789
2790 hci_conn_drop(conn);
2791
2792 /* The device is paired so there is no need to remove
2793 * its connection parameters anymore.
2794 */
2795 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2796
2797 hci_conn_put(conn);
2798
2799 return err;
2800}
2801
2802void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2803{
2804 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2805 struct mgmt_pending_cmd *cmd;
2806
2807 cmd = find_pairing(conn);
2808 if (cmd) {
2809 cmd->cmd_complete(cmd, status);
2810 mgmt_pending_remove(cmd);
2811 }
2812}
2813
2814static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2815{
2816 struct mgmt_pending_cmd *cmd;
2817
2818 BT_DBG("status %u", status);
2819
2820 cmd = find_pairing(conn);
2821 if (!cmd) {
2822 BT_DBG("Unable to find a pending command");
2823 return;
2824 }
2825
2826 cmd->cmd_complete(cmd, mgmt_status(status));
2827 mgmt_pending_remove(cmd);
2828}
2829
2830static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2831{
2832 struct mgmt_pending_cmd *cmd;
2833
2834 BT_DBG("status %u", status);
2835
2836 if (!status)
2837 return;
2838
2839 cmd = find_pairing(conn);
2840 if (!cmd) {
2841 BT_DBG("Unable to find a pending command");
2842 return;
2843 }
2844
2845 cmd->cmd_complete(cmd, mgmt_status(status));
2846 mgmt_pending_remove(cmd);
2847}
2848
2849static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2850 u16 len)
2851{
2852 struct mgmt_cp_pair_device *cp = data;
2853 struct mgmt_rp_pair_device rp;
2854 struct mgmt_pending_cmd *cmd;
2855 u8 sec_level, auth_type;
2856 struct hci_conn *conn;
2857 int err;
2858
2859 BT_DBG("");
2860
2861 memset(&rp, 0, sizeof(rp));
2862 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2863 rp.addr.type = cp->addr.type;
2864
2865 if (!bdaddr_type_is_valid(cp->addr.type))
2866 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2867 MGMT_STATUS_INVALID_PARAMS,
2868 &rp, sizeof(rp));
2869
2870 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2871 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2872 MGMT_STATUS_INVALID_PARAMS,
2873 &rp, sizeof(rp));
2874
2875 hci_dev_lock(hdev);
2876
2877 if (!hdev_is_powered(hdev)) {
2878 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2879 MGMT_STATUS_NOT_POWERED, &rp,
2880 sizeof(rp));
2881 goto unlock;
2882 }
2883
2884 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2885 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2886 MGMT_STATUS_ALREADY_PAIRED, &rp,
2887 sizeof(rp));
2888 goto unlock;
2889 }
2890
2891 sec_level = BT_SECURITY_MEDIUM;
2892 auth_type = HCI_AT_DEDICATED_BONDING;
2893
2894 if (cp->addr.type == BDADDR_BREDR) {
2895 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2896 auth_type);
2897 } else {
2898 u8 addr_type = le_addr_type(cp->addr.type);
2899 struct hci_conn_params *p;
2900
2901 /* When pairing a new device, it is expected to remember
2902 * this device for future connections. Adding the connection
2903 * parameter information ahead of time allows tracking
2904 * of the slave preferred values and will speed up any
2905 * further connection establishment.
2906 *
2907 * If connection parameters already exist, then they
2908 * will be kept and this function does nothing.
2909 */
2910 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2911 if (!p) {
2912 err = -EIO;
2913 goto unlock;
2914 }
2915
2916 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2917 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2918
2919 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2920 addr_type, sec_level,
2921 HCI_LE_CONN_TIMEOUT);
2922 }
2923
2924 if (IS_ERR(conn)) {
2925 int status;
2926
2927 if (PTR_ERR(conn) == -EBUSY)
2928 status = MGMT_STATUS_BUSY;
2929 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2930 status = MGMT_STATUS_NOT_SUPPORTED;
2931 else if (PTR_ERR(conn) == -ECONNREFUSED)
2932 status = MGMT_STATUS_REJECTED;
2933 else
2934 status = MGMT_STATUS_CONNECT_FAILED;
2935
2936 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2937 status, &rp, sizeof(rp));
2938 goto unlock;
2939 }
2940
2941 if (conn->connect_cfm_cb) {
2942 hci_conn_drop(conn);
2943 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2944 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2945 goto unlock;
2946 }
2947
2948 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2949 if (!cmd) {
2950 err = -ENOMEM;
2951 hci_conn_drop(conn);
2952 goto unlock;
2953 }
2954
2955 cmd->cmd_complete = pairing_complete;
2956
2957 /* For LE, just connecting isn't a proof that the pairing finished */
2958 if (cp->addr.type == BDADDR_BREDR) {
2959 conn->connect_cfm_cb = pairing_complete_cb;
2960 conn->security_cfm_cb = pairing_complete_cb;
2961 conn->disconn_cfm_cb = pairing_complete_cb;
2962 } else {
2963 conn->connect_cfm_cb = le_pairing_complete_cb;
2964 conn->security_cfm_cb = le_pairing_complete_cb;
2965 conn->disconn_cfm_cb = le_pairing_complete_cb;
2966 }
2967
2968 conn->io_capability = cp->io_cap;
2969 cmd->user_data = hci_conn_get(conn);
2970
2971 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2972 hci_conn_security(conn, sec_level, auth_type, true)) {
2973 cmd->cmd_complete(cmd, 0);
2974 mgmt_pending_remove(cmd);
2975 }
2976
2977 err = 0;
2978
2979unlock:
2980 hci_dev_unlock(hdev);
2981 return err;
2982}
2983
2984static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2985 u16 len)
2986{
2987 struct mgmt_addr_info *addr = data;
2988 struct mgmt_pending_cmd *cmd;
2989 struct hci_conn *conn;
2990 int err;
2991
2992 BT_DBG("");
2993
2994 hci_dev_lock(hdev);
2995
2996 if (!hdev_is_powered(hdev)) {
2997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2998 MGMT_STATUS_NOT_POWERED);
2999 goto unlock;
3000 }
3001
3002 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3003 if (!cmd) {
3004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3005 MGMT_STATUS_INVALID_PARAMS);
3006 goto unlock;
3007 }
3008
3009 conn = cmd->user_data;
3010
3011 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3012 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3013 MGMT_STATUS_INVALID_PARAMS);
3014 goto unlock;
3015 }
3016
3017 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3018 mgmt_pending_remove(cmd);
3019
3020 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3021 addr, sizeof(*addr));
3022unlock:
3023 hci_dev_unlock(hdev);
3024 return err;
3025}
3026
3027static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3028 struct mgmt_addr_info *addr, u16 mgmt_op,
3029 u16 hci_op, __le32 passkey)
3030{
3031 struct mgmt_pending_cmd *cmd;
3032 struct hci_conn *conn;
3033 int err;
3034
3035 hci_dev_lock(hdev);
3036
3037 if (!hdev_is_powered(hdev)) {
3038 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3039 MGMT_STATUS_NOT_POWERED, addr,
3040 sizeof(*addr));
3041 goto done;
3042 }
3043
3044 if (addr->type == BDADDR_BREDR)
3045 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3046 else
3047 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3048 le_addr_type(addr->type));
3049
3050 if (!conn) {
3051 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3052 MGMT_STATUS_NOT_CONNECTED, addr,
3053 sizeof(*addr));
3054 goto done;
3055 }
3056
3057 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3058 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3059 if (!err)
3060 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3061 MGMT_STATUS_SUCCESS, addr,
3062 sizeof(*addr));
3063 else
3064 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3065 MGMT_STATUS_FAILED, addr,
3066 sizeof(*addr));
3067
3068 goto done;
3069 }
3070
3071 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3072 if (!cmd) {
3073 err = -ENOMEM;
3074 goto done;
3075 }
3076
3077 cmd->cmd_complete = addr_cmd_complete;
3078
3079 /* Continue with pairing via HCI */
3080 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3081 struct hci_cp_user_passkey_reply cp;
3082
3083 bacpy(&cp.bdaddr, &addr->bdaddr);
3084 cp.passkey = passkey;
3085 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3086 } else
3087 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3088 &addr->bdaddr);
3089
3090 if (err < 0)
3091 mgmt_pending_remove(cmd);
3092
3093done:
3094 hci_dev_unlock(hdev);
3095 return err;
3096}
3097
3098static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3099 void *data, u16 len)
3100{
3101 struct mgmt_cp_pin_code_neg_reply *cp = data;
3102
3103 BT_DBG("");
3104
3105 return user_pairing_resp(sk, hdev, &cp->addr,
3106 MGMT_OP_PIN_CODE_NEG_REPLY,
3107 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3108}
3109
3110static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3111 u16 len)
3112{
3113 struct mgmt_cp_user_confirm_reply *cp = data;
3114
3115 BT_DBG("");
3116
3117 if (len != sizeof(*cp))
3118 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3119 MGMT_STATUS_INVALID_PARAMS);
3120
3121 return user_pairing_resp(sk, hdev, &cp->addr,
3122 MGMT_OP_USER_CONFIRM_REPLY,
3123 HCI_OP_USER_CONFIRM_REPLY, 0);
3124}
3125
3126static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3127 void *data, u16 len)
3128{
3129 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3130
3131 BT_DBG("");
3132
3133 return user_pairing_resp(sk, hdev, &cp->addr,
3134 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3135 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3136}
3137
3138static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3139 u16 len)
3140{
3141 struct mgmt_cp_user_passkey_reply *cp = data;
3142
3143 BT_DBG("");
3144
3145 return user_pairing_resp(sk, hdev, &cp->addr,
3146 MGMT_OP_USER_PASSKEY_REPLY,
3147 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3148}
3149
3150static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3151 void *data, u16 len)
3152{
3153 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3154
3155 BT_DBG("");
3156
3157 return user_pairing_resp(sk, hdev, &cp->addr,
3158 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3159 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3160}
3161
3162static void adv_expire(struct hci_dev *hdev, u32 flags)
3163{
3164 struct adv_info *adv_instance;
3165 struct hci_request req;
3166 int err;
3167
3168 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3169 if (!adv_instance)
3170 return;
3171
3172 /* stop if current instance doesn't need to be changed */
3173 if (!(adv_instance->flags & flags))
3174 return;
3175
3176 cancel_adv_timeout(hdev);
3177
3178 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3179 if (!adv_instance)
3180 return;
3181
3182 hci_req_init(&req, hdev);
3183 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3184 true);
3185 if (err)
3186 return;
3187
3188 hci_req_run(&req, NULL);
3189}
3190
3191static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3192{
3193 struct mgmt_cp_set_local_name *cp;
3194 struct mgmt_pending_cmd *cmd;
3195
3196 BT_DBG("status 0x%02x", status);
3197
3198 hci_dev_lock(hdev);
3199
3200 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3201 if (!cmd)
3202 goto unlock;
3203
3204 cp = cmd->param;
3205
3206 if (status) {
3207 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3208 mgmt_status(status));
3209 } else {
3210 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3211 cp, sizeof(*cp));
3212
3213 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3214 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3215 }
3216
3217 mgmt_pending_remove(cmd);
3218
3219unlock:
3220 hci_dev_unlock(hdev);
3221}
3222
3223static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3224 u16 len)
3225{
3226 struct mgmt_cp_set_local_name *cp = data;
3227 struct mgmt_pending_cmd *cmd;
3228 struct hci_request req;
3229 int err;
3230
3231 BT_DBG("");
3232
3233 hci_dev_lock(hdev);
3234
3235 /* If the old values are the same as the new ones just return a
3236 * direct command complete event.
3237 */
3238 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3239 !memcmp(hdev->short_name, cp->short_name,
3240 sizeof(hdev->short_name))) {
3241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3242 data, len);
3243 goto failed;
3244 }
3245
3246 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3247
3248 if (!hdev_is_powered(hdev)) {
3249 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3250
3251 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3252 data, len);
3253 if (err < 0)
3254 goto failed;
3255
3256 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3257 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3258 ext_info_changed(hdev, sk);
3259
3260 goto failed;
3261 }
3262
3263 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3264 if (!cmd) {
3265 err = -ENOMEM;
3266 goto failed;
3267 }
3268
3269 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3270
3271 hci_req_init(&req, hdev);
3272
3273 if (lmp_bredr_capable(hdev)) {
3274 __hci_req_update_name(&req);
3275 __hci_req_update_eir(&req);
3276 }
3277
3278 /* The name is stored in the scan response data and so
3279 * no need to udpate the advertising data here.
3280 */
3281 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3282 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3283
3284 err = hci_req_run(&req, set_name_complete);
3285 if (err < 0)
3286 mgmt_pending_remove(cmd);
3287
3288failed:
3289 hci_dev_unlock(hdev);
3290 return err;
3291}
3292
3293static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3294 u16 len)
3295{
3296 struct mgmt_cp_set_appearance *cp = data;
3297 u16 apperance;
3298 int err;
3299
3300 BT_DBG("");
3301
3302 if (!lmp_le_capable(hdev))
3303 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3304 MGMT_STATUS_NOT_SUPPORTED);
3305
3306 apperance = le16_to_cpu(cp->appearance);
3307
3308 hci_dev_lock(hdev);
3309
3310 if (hdev->appearance != apperance) {
3311 hdev->appearance = apperance;
3312
3313 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3314 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3315
3316 ext_info_changed(hdev, sk);
3317 }
3318
3319 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3320 0);
3321
3322 hci_dev_unlock(hdev);
3323
3324 return err;
3325}
3326
3327static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3328 void *data, u16 len)
3329{
3330 struct mgmt_rp_get_phy_confguration rp;
3331
3332 BT_DBG("sock %p %s", sk, hdev->name);
3333
3334 hci_dev_lock(hdev);
3335
3336 memset(&rp, 0, sizeof(rp));
3337
3338 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3339 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3340 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3341
3342 hci_dev_unlock(hdev);
3343
3344 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3345 &rp, sizeof(rp));
3346}
3347
3348int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3349{
3350 struct mgmt_ev_phy_configuration_changed ev;
3351
3352 memset(&ev, 0, sizeof(ev));
3353
3354 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3355
3356 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3357 sizeof(ev), skip);
3358}
3359
3360static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3361 u16 opcode, struct sk_buff *skb)
3362{
3363 struct mgmt_pending_cmd *cmd;
3364
3365 BT_DBG("status 0x%02x", status);
3366
3367 hci_dev_lock(hdev);
3368
3369 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3370 if (!cmd)
3371 goto unlock;
3372
3373 if (status) {
3374 mgmt_cmd_status(cmd->sk, hdev->id,
3375 MGMT_OP_SET_PHY_CONFIGURATION,
3376 mgmt_status(status));
3377 } else {
3378 mgmt_cmd_complete(cmd->sk, hdev->id,
3379 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3380 NULL, 0);
3381
3382 mgmt_phy_configuration_changed(hdev, cmd->sk);
3383 }
3384
3385 mgmt_pending_remove(cmd);
3386
3387unlock:
3388 hci_dev_unlock(hdev);
3389}
3390
3391static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3392 void *data, u16 len)
3393{
3394 struct mgmt_cp_set_phy_confguration *cp = data;
3395 struct hci_cp_le_set_default_phy cp_phy;
3396 struct mgmt_pending_cmd *cmd;
3397 struct hci_request req;
3398 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3399 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3400 bool changed = false;
3401 int err;
3402
3403 BT_DBG("sock %p %s", sk, hdev->name);
3404
3405 configurable_phys = get_configurable_phys(hdev);
3406 supported_phys = get_supported_phys(hdev);
3407 selected_phys = __le32_to_cpu(cp->selected_phys);
3408
3409 if (selected_phys & ~supported_phys)
3410 return mgmt_cmd_status(sk, hdev->id,
3411 MGMT_OP_SET_PHY_CONFIGURATION,
3412 MGMT_STATUS_INVALID_PARAMS);
3413
3414 unconfigure_phys = supported_phys & ~configurable_phys;
3415
3416 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3417 return mgmt_cmd_status(sk, hdev->id,
3418 MGMT_OP_SET_PHY_CONFIGURATION,
3419 MGMT_STATUS_INVALID_PARAMS);
3420
3421 if (selected_phys == get_selected_phys(hdev))
3422 return mgmt_cmd_complete(sk, hdev->id,
3423 MGMT_OP_SET_PHY_CONFIGURATION,
3424 0, NULL, 0);
3425
3426 hci_dev_lock(hdev);
3427
3428 if (!hdev_is_powered(hdev)) {
3429 err = mgmt_cmd_status(sk, hdev->id,
3430 MGMT_OP_SET_PHY_CONFIGURATION,
3431 MGMT_STATUS_REJECTED);
3432 goto unlock;
3433 }
3434
3435 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3436 err = mgmt_cmd_status(sk, hdev->id,
3437 MGMT_OP_SET_PHY_CONFIGURATION,
3438 MGMT_STATUS_BUSY);
3439 goto unlock;
3440 }
3441
3442 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3443 pkt_type |= (HCI_DH3 | HCI_DM3);
3444 else
3445 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3446
3447 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3448 pkt_type |= (HCI_DH5 | HCI_DM5);
3449 else
3450 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3451
3452 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3453 pkt_type &= ~HCI_2DH1;
3454 else
3455 pkt_type |= HCI_2DH1;
3456
3457 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3458 pkt_type &= ~HCI_2DH3;
3459 else
3460 pkt_type |= HCI_2DH3;
3461
3462 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3463 pkt_type &= ~HCI_2DH5;
3464 else
3465 pkt_type |= HCI_2DH5;
3466
3467 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3468 pkt_type &= ~HCI_3DH1;
3469 else
3470 pkt_type |= HCI_3DH1;
3471
3472 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3473 pkt_type &= ~HCI_3DH3;
3474 else
3475 pkt_type |= HCI_3DH3;
3476
3477 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3478 pkt_type &= ~HCI_3DH5;
3479 else
3480 pkt_type |= HCI_3DH5;
3481
3482 if (pkt_type != hdev->pkt_type) {
3483 hdev->pkt_type = pkt_type;
3484 changed = true;
3485 }
3486
3487 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3488 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3489 if (changed)
3490 mgmt_phy_configuration_changed(hdev, sk);
3491
3492 err = mgmt_cmd_complete(sk, hdev->id,
3493 MGMT_OP_SET_PHY_CONFIGURATION,
3494 0, NULL, 0);
3495
3496 goto unlock;
3497 }
3498
3499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3500 len);
3501 if (!cmd) {
3502 err = -ENOMEM;
3503 goto unlock;
3504 }
3505
3506 hci_req_init(&req, hdev);
3507
3508 memset(&cp_phy, 0, sizeof(cp_phy));
3509
3510 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3511 cp_phy.all_phys |= 0x01;
3512
3513 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3514 cp_phy.all_phys |= 0x02;
3515
3516 if (selected_phys & MGMT_PHY_LE_1M_TX)
3517 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3518
3519 if (selected_phys & MGMT_PHY_LE_2M_TX)
3520 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3521
3522 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3523 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3524
3525 if (selected_phys & MGMT_PHY_LE_1M_RX)
3526 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3527
3528 if (selected_phys & MGMT_PHY_LE_2M_RX)
3529 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3530
3531 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3532 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3533
3534 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3535
3536 err = hci_req_run_skb(&req, set_default_phy_complete);
3537 if (err < 0)
3538 mgmt_pending_remove(cmd);
3539
3540unlock:
3541 hci_dev_unlock(hdev);
3542
3543 return err;
3544}
3545
3546static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3547 u16 opcode, struct sk_buff *skb)
3548{
3549 struct mgmt_rp_read_local_oob_data mgmt_rp;
3550 size_t rp_size = sizeof(mgmt_rp);
3551 struct mgmt_pending_cmd *cmd;
3552
3553 BT_DBG("%s status %u", hdev->name, status);
3554
3555 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3556 if (!cmd)
3557 return;
3558
3559 if (status || !skb) {
3560 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3561 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3562 goto remove;
3563 }
3564
3565 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3566
3567 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3568 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3569
3570 if (skb->len < sizeof(*rp)) {
3571 mgmt_cmd_status(cmd->sk, hdev->id,
3572 MGMT_OP_READ_LOCAL_OOB_DATA,
3573 MGMT_STATUS_FAILED);
3574 goto remove;
3575 }
3576
3577 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3578 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3579
3580 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3581 } else {
3582 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3583
3584 if (skb->len < sizeof(*rp)) {
3585 mgmt_cmd_status(cmd->sk, hdev->id,
3586 MGMT_OP_READ_LOCAL_OOB_DATA,
3587 MGMT_STATUS_FAILED);
3588 goto remove;
3589 }
3590
3591 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3592 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3593
3594 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3595 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3596 }
3597
3598 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3599 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3600
3601remove:
3602 mgmt_pending_remove(cmd);
3603}
3604
3605static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3606 void *data, u16 data_len)
3607{
3608 struct mgmt_pending_cmd *cmd;
3609 struct hci_request req;
3610 int err;
3611
3612 BT_DBG("%s", hdev->name);
3613
3614 hci_dev_lock(hdev);
3615
3616 if (!hdev_is_powered(hdev)) {
3617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3618 MGMT_STATUS_NOT_POWERED);
3619 goto unlock;
3620 }
3621
3622 if (!lmp_ssp_capable(hdev)) {
3623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3624 MGMT_STATUS_NOT_SUPPORTED);
3625 goto unlock;
3626 }
3627
3628 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3629 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3630 MGMT_STATUS_BUSY);
3631 goto unlock;
3632 }
3633
3634 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3635 if (!cmd) {
3636 err = -ENOMEM;
3637 goto unlock;
3638 }
3639
3640 hci_req_init(&req, hdev);
3641
3642 if (bredr_sc_enabled(hdev))
3643 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3644 else
3645 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3646
3647 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3648 if (err < 0)
3649 mgmt_pending_remove(cmd);
3650
3651unlock:
3652 hci_dev_unlock(hdev);
3653 return err;
3654}
3655
3656static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3657 void *data, u16 len)
3658{
3659 struct mgmt_addr_info *addr = data;
3660 int err;
3661
3662 BT_DBG("%s ", hdev->name);
3663
3664 if (!bdaddr_type_is_valid(addr->type))
3665 return mgmt_cmd_complete(sk, hdev->id,
3666 MGMT_OP_ADD_REMOTE_OOB_DATA,
3667 MGMT_STATUS_INVALID_PARAMS,
3668 addr, sizeof(*addr));
3669
3670 hci_dev_lock(hdev);
3671
3672 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3673 struct mgmt_cp_add_remote_oob_data *cp = data;
3674 u8 status;
3675
3676 if (cp->addr.type != BDADDR_BREDR) {
3677 err = mgmt_cmd_complete(sk, hdev->id,
3678 MGMT_OP_ADD_REMOTE_OOB_DATA,
3679 MGMT_STATUS_INVALID_PARAMS,
3680 &cp->addr, sizeof(cp->addr));
3681 goto unlock;
3682 }
3683
3684 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3685 cp->addr.type, cp->hash,
3686 cp->rand, NULL, NULL);
3687 if (err < 0)
3688 status = MGMT_STATUS_FAILED;
3689 else
3690 status = MGMT_STATUS_SUCCESS;
3691
3692 err = mgmt_cmd_complete(sk, hdev->id,
3693 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3694 &cp->addr, sizeof(cp->addr));
3695 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3696 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3697 u8 *rand192, *hash192, *rand256, *hash256;
3698 u8 status;
3699
3700 if (bdaddr_type_is_le(cp->addr.type)) {
3701 /* Enforce zero-valued 192-bit parameters as
3702 * long as legacy SMP OOB isn't implemented.
3703 */
3704 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3705 memcmp(cp->hash192, ZERO_KEY, 16)) {
3706 err = mgmt_cmd_complete(sk, hdev->id,
3707 MGMT_OP_ADD_REMOTE_OOB_DATA,
3708 MGMT_STATUS_INVALID_PARAMS,
3709 addr, sizeof(*addr));
3710 goto unlock;
3711 }
3712
3713 rand192 = NULL;
3714 hash192 = NULL;
3715 } else {
3716 /* In case one of the P-192 values is set to zero,
3717 * then just disable OOB data for P-192.
3718 */
3719 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3720 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3721 rand192 = NULL;
3722 hash192 = NULL;
3723 } else {
3724 rand192 = cp->rand192;
3725 hash192 = cp->hash192;
3726 }
3727 }
3728
3729 /* In case one of the P-256 values is set to zero, then just
3730 * disable OOB data for P-256.
3731 */
3732 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3733 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3734 rand256 = NULL;
3735 hash256 = NULL;
3736 } else {
3737 rand256 = cp->rand256;
3738 hash256 = cp->hash256;
3739 }
3740
3741 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3742 cp->addr.type, hash192, rand192,
3743 hash256, rand256);
3744 if (err < 0)
3745 status = MGMT_STATUS_FAILED;
3746 else
3747 status = MGMT_STATUS_SUCCESS;
3748
3749 err = mgmt_cmd_complete(sk, hdev->id,
3750 MGMT_OP_ADD_REMOTE_OOB_DATA,
3751 status, &cp->addr, sizeof(cp->addr));
3752 } else {
3753 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3754 len);
3755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3756 MGMT_STATUS_INVALID_PARAMS);
3757 }
3758
3759unlock:
3760 hci_dev_unlock(hdev);
3761 return err;
3762}
3763
3764static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3765 void *data, u16 len)
3766{
3767 struct mgmt_cp_remove_remote_oob_data *cp = data;
3768 u8 status;
3769 int err;
3770
3771 BT_DBG("%s", hdev->name);
3772
3773 if (cp->addr.type != BDADDR_BREDR)
3774 return mgmt_cmd_complete(sk, hdev->id,
3775 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3776 MGMT_STATUS_INVALID_PARAMS,
3777 &cp->addr, sizeof(cp->addr));
3778
3779 hci_dev_lock(hdev);
3780
3781 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3782 hci_remote_oob_data_clear(hdev);
3783 status = MGMT_STATUS_SUCCESS;
3784 goto done;
3785 }
3786
3787 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3788 if (err < 0)
3789 status = MGMT_STATUS_INVALID_PARAMS;
3790 else
3791 status = MGMT_STATUS_SUCCESS;
3792
3793done:
3794 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3795 status, &cp->addr, sizeof(cp->addr));
3796
3797 hci_dev_unlock(hdev);
3798 return err;
3799}
3800
3801void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3802{
3803 struct mgmt_pending_cmd *cmd;
3804
3805 BT_DBG("status %d", status);
3806
3807 hci_dev_lock(hdev);
3808
3809 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3810 if (!cmd)
3811 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3812
3813 if (!cmd)
3814 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3815
3816 if (cmd) {
3817 cmd->cmd_complete(cmd, mgmt_status(status));
3818 mgmt_pending_remove(cmd);
3819 }
3820
3821 hci_dev_unlock(hdev);
3822}
3823
3824static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3825 uint8_t *mgmt_status)
3826{
3827 switch (type) {
3828 case DISCOV_TYPE_LE:
3829 *mgmt_status = mgmt_le_support(hdev);
3830 if (*mgmt_status)
3831 return false;
3832 break;
3833 case DISCOV_TYPE_INTERLEAVED:
3834 *mgmt_status = mgmt_le_support(hdev);
3835 if (*mgmt_status)
3836 return false;
3837 /* Intentional fall-through */
3838 case DISCOV_TYPE_BREDR:
3839 *mgmt_status = mgmt_bredr_support(hdev);
3840 if (*mgmt_status)
3841 return false;
3842 break;
3843 default:
3844 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3845 return false;
3846 }
3847
3848 return true;
3849}
3850
3851static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3852 u16 op, void *data, u16 len)
3853{
3854 struct mgmt_cp_start_discovery *cp = data;
3855 struct mgmt_pending_cmd *cmd;
3856 u8 status;
3857 int err;
3858
3859 BT_DBG("%s", hdev->name);
3860
3861 hci_dev_lock(hdev);
3862
3863 if (!hdev_is_powered(hdev)) {
3864 err = mgmt_cmd_complete(sk, hdev->id, op,
3865 MGMT_STATUS_NOT_POWERED,
3866 &cp->type, sizeof(cp->type));
3867 goto failed;
3868 }
3869
3870 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3871 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3872 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3873 &cp->type, sizeof(cp->type));
3874 goto failed;
3875 }
3876
3877 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3878 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3879 &cp->type, sizeof(cp->type));
3880 goto failed;
3881 }
3882
3883 /* Clear the discovery filter first to free any previously
3884 * allocated memory for the UUID list.
3885 */
3886 hci_discovery_filter_clear(hdev);
3887
3888 hdev->discovery.type = cp->type;
3889 hdev->discovery.report_invalid_rssi = false;
3890 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3891 hdev->discovery.limited = true;
3892 else
3893 hdev->discovery.limited = false;
3894
3895 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3896 if (!cmd) {
3897 err = -ENOMEM;
3898 goto failed;
3899 }
3900
3901 cmd->cmd_complete = generic_cmd_complete;
3902
3903 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3904 queue_work(hdev->req_workqueue, &hdev->discov_update);
3905 err = 0;
3906
3907failed:
3908 hci_dev_unlock(hdev);
3909 return err;
3910}
3911
3912static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3913 void *data, u16 len)
3914{
3915 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3916 data, len);
3917}
3918
3919static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3920 void *data, u16 len)
3921{
3922 return start_discovery_internal(sk, hdev,
3923 MGMT_OP_START_LIMITED_DISCOVERY,
3924 data, len);
3925}
3926
3927static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3928 u8 status)
3929{
3930 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3931 cmd->param, 1);
3932}
3933
3934static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3935 void *data, u16 len)
3936{
3937 struct mgmt_cp_start_service_discovery *cp = data;
3938 struct mgmt_pending_cmd *cmd;
3939 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3940 u16 uuid_count, expected_len;
3941 u8 status;
3942 int err;
3943
3944 BT_DBG("%s", hdev->name);
3945
3946 hci_dev_lock(hdev);
3947
3948 if (!hdev_is_powered(hdev)) {
3949 err = mgmt_cmd_complete(sk, hdev->id,
3950 MGMT_OP_START_SERVICE_DISCOVERY,
3951 MGMT_STATUS_NOT_POWERED,
3952 &cp->type, sizeof(cp->type));
3953 goto failed;
3954 }
3955
3956 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3957 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3958 err = mgmt_cmd_complete(sk, hdev->id,
3959 MGMT_OP_START_SERVICE_DISCOVERY,
3960 MGMT_STATUS_BUSY, &cp->type,
3961 sizeof(cp->type));
3962 goto failed;
3963 }
3964
3965 uuid_count = __le16_to_cpu(cp->uuid_count);
3966 if (uuid_count > max_uuid_count) {
3967 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3968 uuid_count);
3969 err = mgmt_cmd_complete(sk, hdev->id,
3970 MGMT_OP_START_SERVICE_DISCOVERY,
3971 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3972 sizeof(cp->type));
3973 goto failed;
3974 }
3975
3976 expected_len = sizeof(*cp) + uuid_count * 16;
3977 if (expected_len != len) {
3978 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3979 expected_len, len);
3980 err = mgmt_cmd_complete(sk, hdev->id,
3981 MGMT_OP_START_SERVICE_DISCOVERY,
3982 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3983 sizeof(cp->type));
3984 goto failed;
3985 }
3986
3987 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3988 err = mgmt_cmd_complete(sk, hdev->id,
3989 MGMT_OP_START_SERVICE_DISCOVERY,
3990 status, &cp->type, sizeof(cp->type));
3991 goto failed;
3992 }
3993
3994 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3995 hdev, data, len);
3996 if (!cmd) {
3997 err = -ENOMEM;
3998 goto failed;
3999 }
4000
4001 cmd->cmd_complete = service_discovery_cmd_complete;
4002
4003 /* Clear the discovery filter first to free any previously
4004 * allocated memory for the UUID list.
4005 */
4006 hci_discovery_filter_clear(hdev);
4007
4008 hdev->discovery.result_filtering = true;
4009 hdev->discovery.type = cp->type;
4010 hdev->discovery.rssi = cp->rssi;
4011 hdev->discovery.uuid_count = uuid_count;
4012
4013 if (uuid_count > 0) {
4014 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4015 GFP_KERNEL);
4016 if (!hdev->discovery.uuids) {
4017 err = mgmt_cmd_complete(sk, hdev->id,
4018 MGMT_OP_START_SERVICE_DISCOVERY,
4019 MGMT_STATUS_FAILED,
4020 &cp->type, sizeof(cp->type));
4021 mgmt_pending_remove(cmd);
4022 goto failed;
4023 }
4024 }
4025
4026 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4027 queue_work(hdev->req_workqueue, &hdev->discov_update);
4028 err = 0;
4029
4030failed:
4031 hci_dev_unlock(hdev);
4032 return err;
4033}
4034
4035void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4036{
4037 struct mgmt_pending_cmd *cmd;
4038
4039 BT_DBG("status %d", status);
4040
4041 hci_dev_lock(hdev);
4042
4043 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4044 if (cmd) {
4045 cmd->cmd_complete(cmd, mgmt_status(status));
4046 mgmt_pending_remove(cmd);
4047 }
4048
4049 hci_dev_unlock(hdev);
4050}
4051
4052static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4053 u16 len)
4054{
4055 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4056 struct mgmt_pending_cmd *cmd;
4057 int err;
4058
4059 BT_DBG("%s", hdev->name);
4060
4061 hci_dev_lock(hdev);
4062
4063 if (!hci_discovery_active(hdev)) {
4064 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4065 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4066 sizeof(mgmt_cp->type));
4067 goto unlock;
4068 }
4069
4070 if (hdev->discovery.type != mgmt_cp->type) {
4071 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4072 MGMT_STATUS_INVALID_PARAMS,
4073 &mgmt_cp->type, sizeof(mgmt_cp->type));
4074 goto unlock;
4075 }
4076
4077 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4078 if (!cmd) {
4079 err = -ENOMEM;
4080 goto unlock;
4081 }
4082
4083 cmd->cmd_complete = generic_cmd_complete;
4084
4085 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4086 queue_work(hdev->req_workqueue, &hdev->discov_update);
4087 err = 0;
4088
4089unlock:
4090 hci_dev_unlock(hdev);
4091 return err;
4092}
4093
4094static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4095 u16 len)
4096{
4097 struct mgmt_cp_confirm_name *cp = data;
4098 struct inquiry_entry *e;
4099 int err;
4100
4101 BT_DBG("%s", hdev->name);
4102
4103 hci_dev_lock(hdev);
4104
4105 if (!hci_discovery_active(hdev)) {
4106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4107 MGMT_STATUS_FAILED, &cp->addr,
4108 sizeof(cp->addr));
4109 goto failed;
4110 }
4111
4112 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4113 if (!e) {
4114 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4115 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4116 sizeof(cp->addr));
4117 goto failed;
4118 }
4119
4120 if (cp->name_known) {
4121 e->name_state = NAME_KNOWN;
4122 list_del(&e->list);
4123 } else {
4124 e->name_state = NAME_NEEDED;
4125 hci_inquiry_cache_update_resolve(hdev, e);
4126 }
4127
4128 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4129 &cp->addr, sizeof(cp->addr));
4130
4131failed:
4132 hci_dev_unlock(hdev);
4133 return err;
4134}
4135
4136static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4137 u16 len)
4138{
4139 struct mgmt_cp_block_device *cp = data;
4140 u8 status;
4141 int err;
4142
4143 BT_DBG("%s", hdev->name);
4144
4145 if (!bdaddr_type_is_valid(cp->addr.type))
4146 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4147 MGMT_STATUS_INVALID_PARAMS,
4148 &cp->addr, sizeof(cp->addr));
4149
4150 hci_dev_lock(hdev);
4151
4152 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4153 cp->addr.type);
4154 if (err < 0) {
4155 status = MGMT_STATUS_FAILED;
4156 goto done;
4157 }
4158
4159 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4160 sk);
4161 status = MGMT_STATUS_SUCCESS;
4162
4163done:
4164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4165 &cp->addr, sizeof(cp->addr));
4166
4167 hci_dev_unlock(hdev);
4168
4169 return err;
4170}
4171
4172static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4173 u16 len)
4174{
4175 struct mgmt_cp_unblock_device *cp = data;
4176 u8 status;
4177 int err;
4178
4179 BT_DBG("%s", hdev->name);
4180
4181 if (!bdaddr_type_is_valid(cp->addr.type))
4182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4183 MGMT_STATUS_INVALID_PARAMS,
4184 &cp->addr, sizeof(cp->addr));
4185
4186 hci_dev_lock(hdev);
4187
4188 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4189 cp->addr.type);
4190 if (err < 0) {
4191 status = MGMT_STATUS_INVALID_PARAMS;
4192 goto done;
4193 }
4194
4195 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4196 sk);
4197 status = MGMT_STATUS_SUCCESS;
4198
4199done:
4200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4201 &cp->addr, sizeof(cp->addr));
4202
4203 hci_dev_unlock(hdev);
4204
4205 return err;
4206}
4207
4208static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4209 u16 len)
4210{
4211 struct mgmt_cp_set_device_id *cp = data;
4212 struct hci_request req;
4213 int err;
4214 __u16 source;
4215
4216 BT_DBG("%s", hdev->name);
4217
4218 source = __le16_to_cpu(cp->source);
4219
4220 if (source > 0x0002)
4221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4222 MGMT_STATUS_INVALID_PARAMS);
4223
4224 hci_dev_lock(hdev);
4225
4226 hdev->devid_source = source;
4227 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4228 hdev->devid_product = __le16_to_cpu(cp->product);
4229 hdev->devid_version = __le16_to_cpu(cp->version);
4230
4231 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4232 NULL, 0);
4233
4234 hci_req_init(&req, hdev);
4235 __hci_req_update_eir(&req);
4236 hci_req_run(&req, NULL);
4237
4238 hci_dev_unlock(hdev);
4239
4240 return err;
4241}
4242
4243static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4244 u16 opcode)
4245{
4246 BT_DBG("status %d", status);
4247}
4248
4249static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4250 u16 opcode)
4251{
4252 struct cmd_lookup match = { NULL, hdev };
4253 struct hci_request req;
4254 u8 instance;
4255 struct adv_info *adv_instance;
4256 int err;
4257
4258 hci_dev_lock(hdev);
4259
4260 if (status) {
4261 u8 mgmt_err = mgmt_status(status);
4262
4263 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4264 cmd_status_rsp, &mgmt_err);
4265 goto unlock;
4266 }
4267
4268 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4269 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4270 else
4271 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4272
4273 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4274 &match);
4275
4276 new_settings(hdev, match.sk);
4277
4278 if (match.sk)
4279 sock_put(match.sk);
4280
4281 /* If "Set Advertising" was just disabled and instance advertising was
4282 * set up earlier, then re-enable multi-instance advertising.
4283 */
4284 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4285 list_empty(&hdev->adv_instances))
4286 goto unlock;
4287
4288 instance = hdev->cur_adv_instance;
4289 if (!instance) {
4290 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4291 struct adv_info, list);
4292 if (!adv_instance)
4293 goto unlock;
4294
4295 instance = adv_instance->instance;
4296 }
4297
4298 hci_req_init(&req, hdev);
4299
4300 err = __hci_req_schedule_adv_instance(&req, instance, true);
4301
4302 if (!err)
4303 err = hci_req_run(&req, enable_advertising_instance);
4304
4305 if (err)
4306 bt_dev_err(hdev, "failed to re-configure advertising");
4307
4308unlock:
4309 hci_dev_unlock(hdev);
4310}
4311
4312static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4313 u16 len)
4314{
4315 struct mgmt_mode *cp = data;
4316 struct mgmt_pending_cmd *cmd;
4317 struct hci_request req;
4318 u8 val, status;
4319 int err;
4320
4321 BT_DBG("request for %s", hdev->name);
4322
4323 status = mgmt_le_support(hdev);
4324 if (status)
4325 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4326 status);
4327
4328 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4329 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4330 MGMT_STATUS_INVALID_PARAMS);
4331
4332 hci_dev_lock(hdev);
4333
4334 val = !!cp->val;
4335
4336 /* The following conditions are ones which mean that we should
4337 * not do any HCI communication but directly send a mgmt
4338 * response to user space (after toggling the flag if
4339 * necessary).
4340 */
4341 if (!hdev_is_powered(hdev) ||
4342 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4343 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4344 hci_conn_num(hdev, LE_LINK) > 0 ||
4345 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4346 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4347 bool changed;
4348
4349 if (cp->val) {
4350 hdev->cur_adv_instance = 0x00;
4351 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4352 if (cp->val == 0x02)
4353 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4354 else
4355 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4356 } else {
4357 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4358 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4359 }
4360
4361 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4362 if (err < 0)
4363 goto unlock;
4364
4365 if (changed)
4366 err = new_settings(hdev, sk);
4367
4368 goto unlock;
4369 }
4370
4371 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4372 pending_find(MGMT_OP_SET_LE, hdev)) {
4373 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4374 MGMT_STATUS_BUSY);
4375 goto unlock;
4376 }
4377
4378 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4379 if (!cmd) {
4380 err = -ENOMEM;
4381 goto unlock;
4382 }
4383
4384 hci_req_init(&req, hdev);
4385
4386 if (cp->val == 0x02)
4387 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4388 else
4389 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4390
4391 cancel_adv_timeout(hdev);
4392
4393 if (val) {
4394 /* Switch to instance "0" for the Set Advertising setting.
4395 * We cannot use update_[adv|scan_rsp]_data() here as the
4396 * HCI_ADVERTISING flag is not yet set.
4397 */
4398 hdev->cur_adv_instance = 0x00;
4399
4400 if (ext_adv_capable(hdev)) {
4401 __hci_req_start_ext_adv(&req, 0x00);
4402 } else {
4403 __hci_req_update_adv_data(&req, 0x00);
4404 __hci_req_update_scan_rsp_data(&req, 0x00);
4405 __hci_req_enable_advertising(&req);
4406 }
4407 } else {
4408 __hci_req_disable_advertising(&req);
4409 }
4410
4411 err = hci_req_run(&req, set_advertising_complete);
4412 if (err < 0)
4413 mgmt_pending_remove(cmd);
4414
4415unlock:
4416 hci_dev_unlock(hdev);
4417 return err;
4418}
4419
4420static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4421 void *data, u16 len)
4422{
4423 struct mgmt_cp_set_static_address *cp = data;
4424 int err;
4425
4426 BT_DBG("%s", hdev->name);
4427
4428 if (!lmp_le_capable(hdev))
4429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4430 MGMT_STATUS_NOT_SUPPORTED);
4431
4432 if (hdev_is_powered(hdev))
4433 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4434 MGMT_STATUS_REJECTED);
4435
4436 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4437 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4438 return mgmt_cmd_status(sk, hdev->id,
4439 MGMT_OP_SET_STATIC_ADDRESS,
4440 MGMT_STATUS_INVALID_PARAMS);
4441
4442 /* Two most significant bits shall be set */
4443 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4444 return mgmt_cmd_status(sk, hdev->id,
4445 MGMT_OP_SET_STATIC_ADDRESS,
4446 MGMT_STATUS_INVALID_PARAMS);
4447 }
4448
4449 hci_dev_lock(hdev);
4450
4451 bacpy(&hdev->static_addr, &cp->bdaddr);
4452
4453 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4454 if (err < 0)
4455 goto unlock;
4456
4457 err = new_settings(hdev, sk);
4458
4459unlock:
4460 hci_dev_unlock(hdev);
4461 return err;
4462}
4463
4464static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4465 void *data, u16 len)
4466{
4467 struct mgmt_cp_set_scan_params *cp = data;
4468 __u16 interval, window;
4469 int err;
4470
4471 BT_DBG("%s", hdev->name);
4472
4473 if (!lmp_le_capable(hdev))
4474 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4475 MGMT_STATUS_NOT_SUPPORTED);
4476
4477 interval = __le16_to_cpu(cp->interval);
4478
4479 if (interval < 0x0004 || interval > 0x4000)
4480 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4481 MGMT_STATUS_INVALID_PARAMS);
4482
4483 window = __le16_to_cpu(cp->window);
4484
4485 if (window < 0x0004 || window > 0x4000)
4486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4487 MGMT_STATUS_INVALID_PARAMS);
4488
4489 if (window > interval)
4490 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4491 MGMT_STATUS_INVALID_PARAMS);
4492
4493 hci_dev_lock(hdev);
4494
4495 hdev->le_scan_interval = interval;
4496 hdev->le_scan_window = window;
4497
4498 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4499 NULL, 0);
4500
4501 /* If background scan is running, restart it so new parameters are
4502 * loaded.
4503 */
4504 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4505 hdev->discovery.state == DISCOVERY_STOPPED) {
4506 struct hci_request req;
4507
4508 hci_req_init(&req, hdev);
4509
4510 hci_req_add_le_scan_disable(&req);
4511 hci_req_add_le_passive_scan(&req);
4512
4513 hci_req_run(&req, NULL);
4514 }
4515
4516 hci_dev_unlock(hdev);
4517
4518 return err;
4519}
4520
4521static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4522 u16 opcode)
4523{
4524 struct mgmt_pending_cmd *cmd;
4525
4526 BT_DBG("status 0x%02x", status);
4527
4528 hci_dev_lock(hdev);
4529
4530 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4531 if (!cmd)
4532 goto unlock;
4533
4534 if (status) {
4535 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4536 mgmt_status(status));
4537 } else {
4538 struct mgmt_mode *cp = cmd->param;
4539
4540 if (cp->val)
4541 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4542 else
4543 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4544
4545 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4546 new_settings(hdev, cmd->sk);
4547 }
4548
4549 mgmt_pending_remove(cmd);
4550
4551unlock:
4552 hci_dev_unlock(hdev);
4553}
4554
4555static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4556 void *data, u16 len)
4557{
4558 struct mgmt_mode *cp = data;
4559 struct mgmt_pending_cmd *cmd;
4560 struct hci_request req;
4561 int err;
4562
4563 BT_DBG("%s", hdev->name);
4564
4565 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4566 hdev->hci_ver < BLUETOOTH_VER_1_2)
4567 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4568 MGMT_STATUS_NOT_SUPPORTED);
4569
4570 if (cp->val != 0x00 && cp->val != 0x01)
4571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4572 MGMT_STATUS_INVALID_PARAMS);
4573
4574 hci_dev_lock(hdev);
4575
4576 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4577 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4578 MGMT_STATUS_BUSY);
4579 goto unlock;
4580 }
4581
4582 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4583 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4584 hdev);
4585 goto unlock;
4586 }
4587
4588 if (!hdev_is_powered(hdev)) {
4589 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4590 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4591 hdev);
4592 new_settings(hdev, sk);
4593 goto unlock;
4594 }
4595
4596 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4597 data, len);
4598 if (!cmd) {
4599 err = -ENOMEM;
4600 goto unlock;
4601 }
4602
4603 hci_req_init(&req, hdev);
4604
4605 __hci_req_write_fast_connectable(&req, cp->val);
4606
4607 err = hci_req_run(&req, fast_connectable_complete);
4608 if (err < 0) {
4609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4610 MGMT_STATUS_FAILED);
4611 mgmt_pending_remove(cmd);
4612 }
4613
4614unlock:
4615 hci_dev_unlock(hdev);
4616
4617 return err;
4618}
4619
4620static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4621{
4622 struct mgmt_pending_cmd *cmd;
4623
4624 BT_DBG("status 0x%02x", status);
4625
4626 hci_dev_lock(hdev);
4627
4628 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4629 if (!cmd)
4630 goto unlock;
4631
4632 if (status) {
4633 u8 mgmt_err = mgmt_status(status);
4634
4635 /* We need to restore the flag if related HCI commands
4636 * failed.
4637 */
4638 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4639
4640 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4641 } else {
4642 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4643 new_settings(hdev, cmd->sk);
4644 }
4645
4646 mgmt_pending_remove(cmd);
4647
4648unlock:
4649 hci_dev_unlock(hdev);
4650}
4651
4652static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4653{
4654 struct mgmt_mode *cp = data;
4655 struct mgmt_pending_cmd *cmd;
4656 struct hci_request req;
4657 int err;
4658
4659 BT_DBG("request for %s", hdev->name);
4660
4661 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4662 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4663 MGMT_STATUS_NOT_SUPPORTED);
4664
4665 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4666 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4667 MGMT_STATUS_REJECTED);
4668
4669 if (cp->val != 0x00 && cp->val != 0x01)
4670 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4671 MGMT_STATUS_INVALID_PARAMS);
4672
4673 hci_dev_lock(hdev);
4674
4675 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4676 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4677 goto unlock;
4678 }
4679
4680 if (!hdev_is_powered(hdev)) {
4681 if (!cp->val) {
4682 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4683 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4684 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4685 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4686 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4687 }
4688
4689 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4690
4691 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4692 if (err < 0)
4693 goto unlock;
4694
4695 err = new_settings(hdev, sk);
4696 goto unlock;
4697 }
4698
4699 /* Reject disabling when powered on */
4700 if (!cp->val) {
4701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4702 MGMT_STATUS_REJECTED);
4703 goto unlock;
4704 } else {
4705 /* When configuring a dual-mode controller to operate
4706 * with LE only and using a static address, then switching
4707 * BR/EDR back on is not allowed.
4708 *
4709 * Dual-mode controllers shall operate with the public
4710 * address as its identity address for BR/EDR and LE. So
4711 * reject the attempt to create an invalid configuration.
4712 *
4713 * The same restrictions applies when secure connections
4714 * has been enabled. For BR/EDR this is a controller feature
4715 * while for LE it is a host stack feature. This means that
4716 * switching BR/EDR back on when secure connections has been
4717 * enabled is not a supported transaction.
4718 */
4719 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4720 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4721 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4722 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4723 MGMT_STATUS_REJECTED);
4724 goto unlock;
4725 }
4726 }
4727
4728 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4730 MGMT_STATUS_BUSY);
4731 goto unlock;
4732 }
4733
4734 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4735 if (!cmd) {
4736 err = -ENOMEM;
4737 goto unlock;
4738 }
4739
4740 /* We need to flip the bit already here so that
4741 * hci_req_update_adv_data generates the correct flags.
4742 */
4743 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4744
4745 hci_req_init(&req, hdev);
4746
4747 __hci_req_write_fast_connectable(&req, false);
4748 __hci_req_update_scan(&req);
4749
4750 /* Since only the advertising data flags will change, there
4751 * is no need to update the scan response data.
4752 */
4753 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4754
4755 err = hci_req_run(&req, set_bredr_complete);
4756 if (err < 0)
4757 mgmt_pending_remove(cmd);
4758
4759unlock:
4760 hci_dev_unlock(hdev);
4761 return err;
4762}
4763
4764static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4765{
4766 struct mgmt_pending_cmd *cmd;
4767 struct mgmt_mode *cp;
4768
4769 BT_DBG("%s status %u", hdev->name, status);
4770
4771 hci_dev_lock(hdev);
4772
4773 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4774 if (!cmd)
4775 goto unlock;
4776
4777 if (status) {
4778 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4779 mgmt_status(status));
4780 goto remove;
4781 }
4782
4783 cp = cmd->param;
4784
4785 switch (cp->val) {
4786 case 0x00:
4787 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4788 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4789 break;
4790 case 0x01:
4791 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4792 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4793 break;
4794 case 0x02:
4795 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4796 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4797 break;
4798 }
4799
4800 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4801 new_settings(hdev, cmd->sk);
4802
4803remove:
4804 mgmt_pending_remove(cmd);
4805unlock:
4806 hci_dev_unlock(hdev);
4807}
4808
4809static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4810 void *data, u16 len)
4811{
4812 struct mgmt_mode *cp = data;
4813 struct mgmt_pending_cmd *cmd;
4814 struct hci_request req;
4815 u8 val;
4816 int err;
4817
4818 BT_DBG("request for %s", hdev->name);
4819
4820 if (!lmp_sc_capable(hdev) &&
4821 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4823 MGMT_STATUS_NOT_SUPPORTED);
4824
4825 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4826 lmp_sc_capable(hdev) &&
4827 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4828 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4829 MGMT_STATUS_REJECTED);
4830
4831 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4832 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4833 MGMT_STATUS_INVALID_PARAMS);
4834
4835 hci_dev_lock(hdev);
4836
4837 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4838 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4839 bool changed;
4840
4841 if (cp->val) {
4842 changed = !hci_dev_test_and_set_flag(hdev,
4843 HCI_SC_ENABLED);
4844 if (cp->val == 0x02)
4845 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4846 else
4847 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4848 } else {
4849 changed = hci_dev_test_and_clear_flag(hdev,
4850 HCI_SC_ENABLED);
4851 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4852 }
4853
4854 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4855 if (err < 0)
4856 goto failed;
4857
4858 if (changed)
4859 err = new_settings(hdev, sk);
4860
4861 goto failed;
4862 }
4863
4864 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4865 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4866 MGMT_STATUS_BUSY);
4867 goto failed;
4868 }
4869
4870 val = !!cp->val;
4871
4872 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4873 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4874 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4875 goto failed;
4876 }
4877
4878 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4879 if (!cmd) {
4880 err = -ENOMEM;
4881 goto failed;
4882 }
4883
4884 hci_req_init(&req, hdev);
4885 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4886 err = hci_req_run(&req, sc_enable_complete);
4887 if (err < 0) {
4888 mgmt_pending_remove(cmd);
4889 goto failed;
4890 }
4891
4892failed:
4893 hci_dev_unlock(hdev);
4894 return err;
4895}
4896
4897static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4898 void *data, u16 len)
4899{
4900 struct mgmt_mode *cp = data;
4901 bool changed, use_changed;
4902 int err;
4903
4904 BT_DBG("request for %s", hdev->name);
4905
4906 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4908 MGMT_STATUS_INVALID_PARAMS);
4909
4910 hci_dev_lock(hdev);
4911
4912 if (cp->val)
4913 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4914 else
4915 changed = hci_dev_test_and_clear_flag(hdev,
4916 HCI_KEEP_DEBUG_KEYS);
4917
4918 if (cp->val == 0x02)
4919 use_changed = !hci_dev_test_and_set_flag(hdev,
4920 HCI_USE_DEBUG_KEYS);
4921 else
4922 use_changed = hci_dev_test_and_clear_flag(hdev,
4923 HCI_USE_DEBUG_KEYS);
4924
4925 if (hdev_is_powered(hdev) && use_changed &&
4926 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4927 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4928 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4929 sizeof(mode), &mode);
4930 }
4931
4932 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4933 if (err < 0)
4934 goto unlock;
4935
4936 if (changed)
4937 err = new_settings(hdev, sk);
4938
4939unlock:
4940 hci_dev_unlock(hdev);
4941 return err;
4942}
4943
4944static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4945 u16 len)
4946{
4947 struct mgmt_cp_set_privacy *cp = cp_data;
4948 bool changed;
4949 int err;
4950
4951 BT_DBG("request for %s", hdev->name);
4952
4953 if (!lmp_le_capable(hdev))
4954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4955 MGMT_STATUS_NOT_SUPPORTED);
4956
4957 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4959 MGMT_STATUS_INVALID_PARAMS);
4960
4961 if (hdev_is_powered(hdev))
4962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4963 MGMT_STATUS_REJECTED);
4964
4965 hci_dev_lock(hdev);
4966
4967 /* If user space supports this command it is also expected to
4968 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4969 */
4970 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4971
4972 if (cp->privacy) {
4973 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4974 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4975 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4976 hci_adv_instances_set_rpa_expired(hdev, true);
4977 if (cp->privacy == 0x02)
4978 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4979 else
4980 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4981 } else {
4982 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4983 memset(hdev->irk, 0, sizeof(hdev->irk));
4984 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4985 hci_adv_instances_set_rpa_expired(hdev, false);
4986 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4987 }
4988
4989 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4990 if (err < 0)
4991 goto unlock;
4992
4993 if (changed)
4994 err = new_settings(hdev, sk);
4995
4996unlock:
4997 hci_dev_unlock(hdev);
4998 return err;
4999}
5000
5001static bool irk_is_valid(struct mgmt_irk_info *irk)
5002{
5003 switch (irk->addr.type) {
5004 case BDADDR_LE_PUBLIC:
5005 return true;
5006
5007 case BDADDR_LE_RANDOM:
5008 /* Two most significant bits shall be set */
5009 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5010 return false;
5011 return true;
5012 }
5013
5014 return false;
5015}
5016
5017static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5018 u16 len)
5019{
5020 struct mgmt_cp_load_irks *cp = cp_data;
5021 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5022 sizeof(struct mgmt_irk_info));
5023 u16 irk_count, expected_len;
5024 int i, err;
5025
5026 BT_DBG("request for %s", hdev->name);
5027
5028 if (!lmp_le_capable(hdev))
5029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5030 MGMT_STATUS_NOT_SUPPORTED);
5031
5032 irk_count = __le16_to_cpu(cp->irk_count);
5033 if (irk_count > max_irk_count) {
5034 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5035 irk_count);
5036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5037 MGMT_STATUS_INVALID_PARAMS);
5038 }
5039
5040 expected_len = struct_size(cp, irks, irk_count);
5041 if (expected_len != len) {
5042 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5043 expected_len, len);
5044 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5045 MGMT_STATUS_INVALID_PARAMS);
5046 }
5047
5048 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5049
5050 for (i = 0; i < irk_count; i++) {
5051 struct mgmt_irk_info *key = &cp->irks[i];
5052
5053 if (!irk_is_valid(key))
5054 return mgmt_cmd_status(sk, hdev->id,
5055 MGMT_OP_LOAD_IRKS,
5056 MGMT_STATUS_INVALID_PARAMS);
5057 }
5058
5059 hci_dev_lock(hdev);
5060
5061 hci_smp_irks_clear(hdev);
5062
5063 for (i = 0; i < irk_count; i++) {
5064 struct mgmt_irk_info *irk = &cp->irks[i];
5065
5066 hci_add_irk(hdev, &irk->addr.bdaddr,
5067 le_addr_type(irk->addr.type), irk->val,
5068 BDADDR_ANY);
5069 }
5070
5071 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5072
5073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5074
5075 hci_dev_unlock(hdev);
5076
5077 return err;
5078}
5079
5080static bool ltk_is_valid(struct mgmt_ltk_info *key)
5081{
5082 if (key->master != 0x00 && key->master != 0x01)
5083 return false;
5084
5085 switch (key->addr.type) {
5086 case BDADDR_LE_PUBLIC:
5087 return true;
5088
5089 case BDADDR_LE_RANDOM:
5090 /* Two most significant bits shall be set */
5091 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5092 return false;
5093 return true;
5094 }
5095
5096 return false;
5097}
5098
5099static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5100 void *cp_data, u16 len)
5101{
5102 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5103 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5104 sizeof(struct mgmt_ltk_info));
5105 u16 key_count, expected_len;
5106 int i, err;
5107
5108 BT_DBG("request for %s", hdev->name);
5109
5110 if (!lmp_le_capable(hdev))
5111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5112 MGMT_STATUS_NOT_SUPPORTED);
5113
5114 key_count = __le16_to_cpu(cp->key_count);
5115 if (key_count > max_key_count) {
5116 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5117 key_count);
5118 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5119 MGMT_STATUS_INVALID_PARAMS);
5120 }
5121
5122 expected_len = struct_size(cp, keys, key_count);
5123 if (expected_len != len) {
5124 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5125 expected_len, len);
5126 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5127 MGMT_STATUS_INVALID_PARAMS);
5128 }
5129
5130 BT_DBG("%s key_count %u", hdev->name, key_count);
5131
5132 for (i = 0; i < key_count; i++) {
5133 struct mgmt_ltk_info *key = &cp->keys[i];
5134
5135 if (!ltk_is_valid(key))
5136 return mgmt_cmd_status(sk, hdev->id,
5137 MGMT_OP_LOAD_LONG_TERM_KEYS,
5138 MGMT_STATUS_INVALID_PARAMS);
5139 }
5140
5141 hci_dev_lock(hdev);
5142
5143 hci_smp_ltks_clear(hdev);
5144
5145 for (i = 0; i < key_count; i++) {
5146 struct mgmt_ltk_info *key = &cp->keys[i];
5147 u8 type, authenticated;
5148
5149 switch (key->type) {
5150 case MGMT_LTK_UNAUTHENTICATED:
5151 authenticated = 0x00;
5152 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5153 break;
5154 case MGMT_LTK_AUTHENTICATED:
5155 authenticated = 0x01;
5156 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5157 break;
5158 case MGMT_LTK_P256_UNAUTH:
5159 authenticated = 0x00;
5160 type = SMP_LTK_P256;
5161 break;
5162 case MGMT_LTK_P256_AUTH:
5163 authenticated = 0x01;
5164 type = SMP_LTK_P256;
5165 break;
5166 case MGMT_LTK_P256_DEBUG:
5167 authenticated = 0x00;
5168 type = SMP_LTK_P256_DEBUG;
5169 /* fall through */
5170 default:
5171 continue;
5172 }
5173
5174 hci_add_ltk(hdev, &key->addr.bdaddr,
5175 le_addr_type(key->addr.type), type, authenticated,
5176 key->val, key->enc_size, key->ediv, key->rand);
5177 }
5178
5179 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5180 NULL, 0);
5181
5182 hci_dev_unlock(hdev);
5183
5184 return err;
5185}
5186
5187static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5188{
5189 struct hci_conn *conn = cmd->user_data;
5190 struct mgmt_rp_get_conn_info rp;
5191 int err;
5192
5193 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5194
5195 if (status == MGMT_STATUS_SUCCESS) {
5196 rp.rssi = conn->rssi;
5197 rp.tx_power = conn->tx_power;
5198 rp.max_tx_power = conn->max_tx_power;
5199 } else {
5200 rp.rssi = HCI_RSSI_INVALID;
5201 rp.tx_power = HCI_TX_POWER_INVALID;
5202 rp.max_tx_power = HCI_TX_POWER_INVALID;
5203 }
5204
5205 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5206 status, &rp, sizeof(rp));
5207
5208 hci_conn_drop(conn);
5209 hci_conn_put(conn);
5210
5211 return err;
5212}
5213
5214static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5215 u16 opcode)
5216{
5217 struct hci_cp_read_rssi *cp;
5218 struct mgmt_pending_cmd *cmd;
5219 struct hci_conn *conn;
5220 u16 handle;
5221 u8 status;
5222
5223 BT_DBG("status 0x%02x", hci_status);
5224
5225 hci_dev_lock(hdev);
5226
5227 /* Commands sent in request are either Read RSSI or Read Transmit Power
5228 * Level so we check which one was last sent to retrieve connection
5229 * handle. Both commands have handle as first parameter so it's safe to
5230 * cast data on the same command struct.
5231 *
5232 * First command sent is always Read RSSI and we fail only if it fails.
5233 * In other case we simply override error to indicate success as we
5234 * already remembered if TX power value is actually valid.
5235 */
5236 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5237 if (!cp) {
5238 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5239 status = MGMT_STATUS_SUCCESS;
5240 } else {
5241 status = mgmt_status(hci_status);
5242 }
5243
5244 if (!cp) {
5245 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5246 goto unlock;
5247 }
5248
5249 handle = __le16_to_cpu(cp->handle);
5250 conn = hci_conn_hash_lookup_handle(hdev, handle);
5251 if (!conn) {
5252 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5253 handle);
5254 goto unlock;
5255 }
5256
5257 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5258 if (!cmd)
5259 goto unlock;
5260
5261 cmd->cmd_complete(cmd, status);
5262 mgmt_pending_remove(cmd);
5263
5264unlock:
5265 hci_dev_unlock(hdev);
5266}
5267
5268static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5269 u16 len)
5270{
5271 struct mgmt_cp_get_conn_info *cp = data;
5272 struct mgmt_rp_get_conn_info rp;
5273 struct hci_conn *conn;
5274 unsigned long conn_info_age;
5275 int err = 0;
5276
5277 BT_DBG("%s", hdev->name);
5278
5279 memset(&rp, 0, sizeof(rp));
5280 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5281 rp.addr.type = cp->addr.type;
5282
5283 if (!bdaddr_type_is_valid(cp->addr.type))
5284 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5285 MGMT_STATUS_INVALID_PARAMS,
5286 &rp, sizeof(rp));
5287
5288 hci_dev_lock(hdev);
5289
5290 if (!hdev_is_powered(hdev)) {
5291 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5292 MGMT_STATUS_NOT_POWERED, &rp,
5293 sizeof(rp));
5294 goto unlock;
5295 }
5296
5297 if (cp->addr.type == BDADDR_BREDR)
5298 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5299 &cp->addr.bdaddr);
5300 else
5301 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5302
5303 if (!conn || conn->state != BT_CONNECTED) {
5304 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5305 MGMT_STATUS_NOT_CONNECTED, &rp,
5306 sizeof(rp));
5307 goto unlock;
5308 }
5309
5310 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5311 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5312 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5313 goto unlock;
5314 }
5315
5316 /* To avoid client trying to guess when to poll again for information we
5317 * calculate conn info age as random value between min/max set in hdev.
5318 */
5319 conn_info_age = hdev->conn_info_min_age +
5320 prandom_u32_max(hdev->conn_info_max_age -
5321 hdev->conn_info_min_age);
5322
5323 /* Query controller to refresh cached values if they are too old or were
5324 * never read.
5325 */
5326 if (time_after(jiffies, conn->conn_info_timestamp +
5327 msecs_to_jiffies(conn_info_age)) ||
5328 !conn->conn_info_timestamp) {
5329 struct hci_request req;
5330 struct hci_cp_read_tx_power req_txp_cp;
5331 struct hci_cp_read_rssi req_rssi_cp;
5332 struct mgmt_pending_cmd *cmd;
5333
5334 hci_req_init(&req, hdev);
5335 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5336 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5337 &req_rssi_cp);
5338
5339 /* For LE links TX power does not change thus we don't need to
5340 * query for it once value is known.
5341 */
5342 if (!bdaddr_type_is_le(cp->addr.type) ||
5343 conn->tx_power == HCI_TX_POWER_INVALID) {
5344 req_txp_cp.handle = cpu_to_le16(conn->handle);
5345 req_txp_cp.type = 0x00;
5346 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5347 sizeof(req_txp_cp), &req_txp_cp);
5348 }
5349
5350 /* Max TX power needs to be read only once per connection */
5351 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5352 req_txp_cp.handle = cpu_to_le16(conn->handle);
5353 req_txp_cp.type = 0x01;
5354 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5355 sizeof(req_txp_cp), &req_txp_cp);
5356 }
5357
5358 err = hci_req_run(&req, conn_info_refresh_complete);
5359 if (err < 0)
5360 goto unlock;
5361
5362 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5363 data, len);
5364 if (!cmd) {
5365 err = -ENOMEM;
5366 goto unlock;
5367 }
5368
5369 hci_conn_hold(conn);
5370 cmd->user_data = hci_conn_get(conn);
5371 cmd->cmd_complete = conn_info_cmd_complete;
5372
5373 conn->conn_info_timestamp = jiffies;
5374 } else {
5375 /* Cache is valid, just reply with values cached in hci_conn */
5376 rp.rssi = conn->rssi;
5377 rp.tx_power = conn->tx_power;
5378 rp.max_tx_power = conn->max_tx_power;
5379
5380 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5381 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5382 }
5383
5384unlock:
5385 hci_dev_unlock(hdev);
5386 return err;
5387}
5388
5389static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5390{
5391 struct hci_conn *conn = cmd->user_data;
5392 struct mgmt_rp_get_clock_info rp;
5393 struct hci_dev *hdev;
5394 int err;
5395
5396 memset(&rp, 0, sizeof(rp));
5397 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5398
5399 if (status)
5400 goto complete;
5401
5402 hdev = hci_dev_get(cmd->index);
5403 if (hdev) {
5404 rp.local_clock = cpu_to_le32(hdev->clock);
5405 hci_dev_put(hdev);
5406 }
5407
5408 if (conn) {
5409 rp.piconet_clock = cpu_to_le32(conn->clock);
5410 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5411 }
5412
5413complete:
5414 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5415 sizeof(rp));
5416
5417 if (conn) {
5418 hci_conn_drop(conn);
5419 hci_conn_put(conn);
5420 }
5421
5422 return err;
5423}
5424
5425static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5426{
5427 struct hci_cp_read_clock *hci_cp;
5428 struct mgmt_pending_cmd *cmd;
5429 struct hci_conn *conn;
5430
5431 BT_DBG("%s status %u", hdev->name, status);
5432
5433 hci_dev_lock(hdev);
5434
5435 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5436 if (!hci_cp)
5437 goto unlock;
5438
5439 if (hci_cp->which) {
5440 u16 handle = __le16_to_cpu(hci_cp->handle);
5441 conn = hci_conn_hash_lookup_handle(hdev, handle);
5442 } else {
5443 conn = NULL;
5444 }
5445
5446 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5447 if (!cmd)
5448 goto unlock;
5449
5450 cmd->cmd_complete(cmd, mgmt_status(status));
5451 mgmt_pending_remove(cmd);
5452
5453unlock:
5454 hci_dev_unlock(hdev);
5455}
5456
5457static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5458 u16 len)
5459{
5460 struct mgmt_cp_get_clock_info *cp = data;
5461 struct mgmt_rp_get_clock_info rp;
5462 struct hci_cp_read_clock hci_cp;
5463 struct mgmt_pending_cmd *cmd;
5464 struct hci_request req;
5465 struct hci_conn *conn;
5466 int err;
5467
5468 BT_DBG("%s", hdev->name);
5469
5470 memset(&rp, 0, sizeof(rp));
5471 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5472 rp.addr.type = cp->addr.type;
5473
5474 if (cp->addr.type != BDADDR_BREDR)
5475 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5476 MGMT_STATUS_INVALID_PARAMS,
5477 &rp, sizeof(rp));
5478
5479 hci_dev_lock(hdev);
5480
5481 if (!hdev_is_powered(hdev)) {
5482 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5483 MGMT_STATUS_NOT_POWERED, &rp,
5484 sizeof(rp));
5485 goto unlock;
5486 }
5487
5488 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5489 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5490 &cp->addr.bdaddr);
5491 if (!conn || conn->state != BT_CONNECTED) {
5492 err = mgmt_cmd_complete(sk, hdev->id,
5493 MGMT_OP_GET_CLOCK_INFO,
5494 MGMT_STATUS_NOT_CONNECTED,
5495 &rp, sizeof(rp));
5496 goto unlock;
5497 }
5498 } else {
5499 conn = NULL;
5500 }
5501
5502 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5503 if (!cmd) {
5504 err = -ENOMEM;
5505 goto unlock;
5506 }
5507
5508 cmd->cmd_complete = clock_info_cmd_complete;
5509
5510 hci_req_init(&req, hdev);
5511
5512 memset(&hci_cp, 0, sizeof(hci_cp));
5513 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5514
5515 if (conn) {
5516 hci_conn_hold(conn);
5517 cmd->user_data = hci_conn_get(conn);
5518
5519 hci_cp.handle = cpu_to_le16(conn->handle);
5520 hci_cp.which = 0x01; /* Piconet clock */
5521 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5522 }
5523
5524 err = hci_req_run(&req, get_clock_info_complete);
5525 if (err < 0)
5526 mgmt_pending_remove(cmd);
5527
5528unlock:
5529 hci_dev_unlock(hdev);
5530 return err;
5531}
5532
5533static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5534{
5535 struct hci_conn *conn;
5536
5537 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5538 if (!conn)
5539 return false;
5540
5541 if (conn->dst_type != type)
5542 return false;
5543
5544 if (conn->state != BT_CONNECTED)
5545 return false;
5546
5547 return true;
5548}
5549
5550/* This function requires the caller holds hdev->lock */
5551static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5552 u8 addr_type, u8 auto_connect)
5553{
5554 struct hci_conn_params *params;
5555
5556 params = hci_conn_params_add(hdev, addr, addr_type);
5557 if (!params)
5558 return -EIO;
5559
5560 if (params->auto_connect == auto_connect)
5561 return 0;
5562
5563 list_del_init(&params->action);
5564
5565 switch (auto_connect) {
5566 case HCI_AUTO_CONN_DISABLED:
5567 case HCI_AUTO_CONN_LINK_LOSS:
5568 /* If auto connect is being disabled when we're trying to
5569 * connect to device, keep connecting.
5570 */
5571 if (params->explicit_connect)
5572 list_add(&params->action, &hdev->pend_le_conns);
5573 break;
5574 case HCI_AUTO_CONN_REPORT:
5575 if (params->explicit_connect)
5576 list_add(&params->action, &hdev->pend_le_conns);
5577 else
5578 list_add(&params->action, &hdev->pend_le_reports);
5579 break;
5580 case HCI_AUTO_CONN_DIRECT:
5581 case HCI_AUTO_CONN_ALWAYS:
5582 if (!is_connected(hdev, addr, addr_type))
5583 list_add(&params->action, &hdev->pend_le_conns);
5584 break;
5585 }
5586
5587 params->auto_connect = auto_connect;
5588
5589 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5590 auto_connect);
5591
5592 return 0;
5593}
5594
5595static void device_added(struct sock *sk, struct hci_dev *hdev,
5596 bdaddr_t *bdaddr, u8 type, u8 action)
5597{
5598 struct mgmt_ev_device_added ev;
5599
5600 bacpy(&ev.addr.bdaddr, bdaddr);
5601 ev.addr.type = type;
5602 ev.action = action;
5603
5604 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5605}
5606
5607static int add_device(struct sock *sk, struct hci_dev *hdev,
5608 void *data, u16 len)
5609{
5610 struct mgmt_cp_add_device *cp = data;
5611 u8 auto_conn, addr_type;
5612 int err;
5613
5614 BT_DBG("%s", hdev->name);
5615
5616 if (!bdaddr_type_is_valid(cp->addr.type) ||
5617 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5618 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5619 MGMT_STATUS_INVALID_PARAMS,
5620 &cp->addr, sizeof(cp->addr));
5621
5622 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5623 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5624 MGMT_STATUS_INVALID_PARAMS,
5625 &cp->addr, sizeof(cp->addr));
5626
5627 hci_dev_lock(hdev);
5628
5629 if (cp->addr.type == BDADDR_BREDR) {
5630 /* Only incoming connections action is supported for now */
5631 if (cp->action != 0x01) {
5632 err = mgmt_cmd_complete(sk, hdev->id,
5633 MGMT_OP_ADD_DEVICE,
5634 MGMT_STATUS_INVALID_PARAMS,
5635 &cp->addr, sizeof(cp->addr));
5636 goto unlock;
5637 }
5638
5639 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5640 cp->addr.type);
5641 if (err)
5642 goto unlock;
5643
5644 hci_req_update_scan(hdev);
5645
5646 goto added;
5647 }
5648
5649 addr_type = le_addr_type(cp->addr.type);
5650
5651 if (cp->action == 0x02)
5652 auto_conn = HCI_AUTO_CONN_ALWAYS;
5653 else if (cp->action == 0x01)
5654 auto_conn = HCI_AUTO_CONN_DIRECT;
5655 else
5656 auto_conn = HCI_AUTO_CONN_REPORT;
5657
5658 /* Kernel internally uses conn_params with resolvable private
5659 * address, but Add Device allows only identity addresses.
5660 * Make sure it is enforced before calling
5661 * hci_conn_params_lookup.
5662 */
5663 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5664 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5665 MGMT_STATUS_INVALID_PARAMS,
5666 &cp->addr, sizeof(cp->addr));
5667 goto unlock;
5668 }
5669
5670 /* If the connection parameters don't exist for this device,
5671 * they will be created and configured with defaults.
5672 */
5673 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5674 auto_conn) < 0) {
5675 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5676 MGMT_STATUS_FAILED, &cp->addr,
5677 sizeof(cp->addr));
5678 goto unlock;
5679 }
5680
5681 hci_update_background_scan(hdev);
5682
5683added:
5684 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5685
5686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5687 MGMT_STATUS_SUCCESS, &cp->addr,
5688 sizeof(cp->addr));
5689
5690unlock:
5691 hci_dev_unlock(hdev);
5692 return err;
5693}
5694
5695static void device_removed(struct sock *sk, struct hci_dev *hdev,
5696 bdaddr_t *bdaddr, u8 type)
5697{
5698 struct mgmt_ev_device_removed ev;
5699
5700 bacpy(&ev.addr.bdaddr, bdaddr);
5701 ev.addr.type = type;
5702
5703 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5704}
5705
5706static int remove_device(struct sock *sk, struct hci_dev *hdev,
5707 void *data, u16 len)
5708{
5709 struct mgmt_cp_remove_device *cp = data;
5710 int err;
5711
5712 BT_DBG("%s", hdev->name);
5713
5714 hci_dev_lock(hdev);
5715
5716 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5717 struct hci_conn_params *params;
5718 u8 addr_type;
5719
5720 if (!bdaddr_type_is_valid(cp->addr.type)) {
5721 err = mgmt_cmd_complete(sk, hdev->id,
5722 MGMT_OP_REMOVE_DEVICE,
5723 MGMT_STATUS_INVALID_PARAMS,
5724 &cp->addr, sizeof(cp->addr));
5725 goto unlock;
5726 }
5727
5728 if (cp->addr.type == BDADDR_BREDR) {
5729 err = hci_bdaddr_list_del(&hdev->whitelist,
5730 &cp->addr.bdaddr,
5731 cp->addr.type);
5732 if (err) {
5733 err = mgmt_cmd_complete(sk, hdev->id,
5734 MGMT_OP_REMOVE_DEVICE,
5735 MGMT_STATUS_INVALID_PARAMS,
5736 &cp->addr,
5737 sizeof(cp->addr));
5738 goto unlock;
5739 }
5740
5741 hci_req_update_scan(hdev);
5742
5743 device_removed(sk, hdev, &cp->addr.bdaddr,
5744 cp->addr.type);
5745 goto complete;
5746 }
5747
5748 addr_type = le_addr_type(cp->addr.type);
5749
5750 /* Kernel internally uses conn_params with resolvable private
5751 * address, but Remove Device allows only identity addresses.
5752 * Make sure it is enforced before calling
5753 * hci_conn_params_lookup.
5754 */
5755 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5756 err = mgmt_cmd_complete(sk, hdev->id,
5757 MGMT_OP_REMOVE_DEVICE,
5758 MGMT_STATUS_INVALID_PARAMS,
5759 &cp->addr, sizeof(cp->addr));
5760 goto unlock;
5761 }
5762
5763 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5764 addr_type);
5765 if (!params) {
5766 err = mgmt_cmd_complete(sk, hdev->id,
5767 MGMT_OP_REMOVE_DEVICE,
5768 MGMT_STATUS_INVALID_PARAMS,
5769 &cp->addr, sizeof(cp->addr));
5770 goto unlock;
5771 }
5772
5773 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5774 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5775 err = mgmt_cmd_complete(sk, hdev->id,
5776 MGMT_OP_REMOVE_DEVICE,
5777 MGMT_STATUS_INVALID_PARAMS,
5778 &cp->addr, sizeof(cp->addr));
5779 goto unlock;
5780 }
5781
5782 list_del(&params->action);
5783 list_del(&params->list);
5784 kfree(params);
5785 hci_update_background_scan(hdev);
5786
5787 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5788 } else {
5789 struct hci_conn_params *p, *tmp;
5790 struct bdaddr_list *b, *btmp;
5791
5792 if (cp->addr.type) {
5793 err = mgmt_cmd_complete(sk, hdev->id,
5794 MGMT_OP_REMOVE_DEVICE,
5795 MGMT_STATUS_INVALID_PARAMS,
5796 &cp->addr, sizeof(cp->addr));
5797 goto unlock;
5798 }
5799
5800 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5801 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5802 list_del(&b->list);
5803 kfree(b);
5804 }
5805
5806 hci_req_update_scan(hdev);
5807
5808 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5809 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5810 continue;
5811 device_removed(sk, hdev, &p->addr, p->addr_type);
5812 if (p->explicit_connect) {
5813 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5814 continue;
5815 }
5816 list_del(&p->action);
5817 list_del(&p->list);
5818 kfree(p);
5819 }
5820
5821 BT_DBG("All LE connection parameters were removed");
5822
5823 hci_update_background_scan(hdev);
5824 }
5825
5826complete:
5827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5828 MGMT_STATUS_SUCCESS, &cp->addr,
5829 sizeof(cp->addr));
5830unlock:
5831 hci_dev_unlock(hdev);
5832 return err;
5833}
5834
5835static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5836 u16 len)
5837{
5838 struct mgmt_cp_load_conn_param *cp = data;
5839 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5840 sizeof(struct mgmt_conn_param));
5841 u16 param_count, expected_len;
5842 int i;
5843
5844 if (!lmp_le_capable(hdev))
5845 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5846 MGMT_STATUS_NOT_SUPPORTED);
5847
5848 param_count = __le16_to_cpu(cp->param_count);
5849 if (param_count > max_param_count) {
5850 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5851 param_count);
5852 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5853 MGMT_STATUS_INVALID_PARAMS);
5854 }
5855
5856 expected_len = struct_size(cp, params, param_count);
5857 if (expected_len != len) {
5858 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5859 expected_len, len);
5860 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5861 MGMT_STATUS_INVALID_PARAMS);
5862 }
5863
5864 BT_DBG("%s param_count %u", hdev->name, param_count);
5865
5866 hci_dev_lock(hdev);
5867
5868 hci_conn_params_clear_disabled(hdev);
5869
5870 for (i = 0; i < param_count; i++) {
5871 struct mgmt_conn_param *param = &cp->params[i];
5872 struct hci_conn_params *hci_param;
5873 u16 min, max, latency, timeout;
5874 u8 addr_type;
5875
5876 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5877 param->addr.type);
5878
5879 if (param->addr.type == BDADDR_LE_PUBLIC) {
5880 addr_type = ADDR_LE_DEV_PUBLIC;
5881 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5882 addr_type = ADDR_LE_DEV_RANDOM;
5883 } else {
5884 bt_dev_err(hdev, "ignoring invalid connection parameters");
5885 continue;
5886 }
5887
5888 min = le16_to_cpu(param->min_interval);
5889 max = le16_to_cpu(param->max_interval);
5890 latency = le16_to_cpu(param->latency);
5891 timeout = le16_to_cpu(param->timeout);
5892
5893 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5894 min, max, latency, timeout);
5895
5896 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5897 bt_dev_err(hdev, "ignoring invalid connection parameters");
5898 continue;
5899 }
5900
5901 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5902 addr_type);
5903 if (!hci_param) {
5904 bt_dev_err(hdev, "failed to add connection parameters");
5905 continue;
5906 }
5907
5908 hci_param->conn_min_interval = min;
5909 hci_param->conn_max_interval = max;
5910 hci_param->conn_latency = latency;
5911 hci_param->supervision_timeout = timeout;
5912 }
5913
5914 hci_dev_unlock(hdev);
5915
5916 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5917 NULL, 0);
5918}
5919
5920static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5921 void *data, u16 len)
5922{
5923 struct mgmt_cp_set_external_config *cp = data;
5924 bool changed;
5925 int err;
5926
5927 BT_DBG("%s", hdev->name);
5928
5929 if (hdev_is_powered(hdev))
5930 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5931 MGMT_STATUS_REJECTED);
5932
5933 if (cp->config != 0x00 && cp->config != 0x01)
5934 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5935 MGMT_STATUS_INVALID_PARAMS);
5936
5937 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5938 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5939 MGMT_STATUS_NOT_SUPPORTED);
5940
5941 hci_dev_lock(hdev);
5942
5943 if (cp->config)
5944 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5945 else
5946 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5947
5948 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5949 if (err < 0)
5950 goto unlock;
5951
5952 if (!changed)
5953 goto unlock;
5954
5955 err = new_options(hdev, sk);
5956
5957 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5958 mgmt_index_removed(hdev);
5959
5960 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5961 hci_dev_set_flag(hdev, HCI_CONFIG);
5962 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5963
5964 queue_work(hdev->req_workqueue, &hdev->power_on);
5965 } else {
5966 set_bit(HCI_RAW, &hdev->flags);
5967 mgmt_index_added(hdev);
5968 }
5969 }
5970
5971unlock:
5972 hci_dev_unlock(hdev);
5973 return err;
5974}
5975
5976static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5977 void *data, u16 len)
5978{
5979 struct mgmt_cp_set_public_address *cp = data;
5980 bool changed;
5981 int err;
5982
5983 BT_DBG("%s", hdev->name);
5984
5985 if (hdev_is_powered(hdev))
5986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5987 MGMT_STATUS_REJECTED);
5988
5989 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5990 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5991 MGMT_STATUS_INVALID_PARAMS);
5992
5993 if (!hdev->set_bdaddr)
5994 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5995 MGMT_STATUS_NOT_SUPPORTED);
5996
5997 hci_dev_lock(hdev);
5998
5999 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6000 bacpy(&hdev->public_addr, &cp->bdaddr);
6001
6002 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6003 if (err < 0)
6004 goto unlock;
6005
6006 if (!changed)
6007 goto unlock;
6008
6009 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6010 err = new_options(hdev, sk);
6011
6012 if (is_configured(hdev)) {
6013 mgmt_index_removed(hdev);
6014
6015 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6016
6017 hci_dev_set_flag(hdev, HCI_CONFIG);
6018 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6019
6020 queue_work(hdev->req_workqueue, &hdev->power_on);
6021 }
6022
6023unlock:
6024 hci_dev_unlock(hdev);
6025 return err;
6026}
6027
6028static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6029 u16 opcode, struct sk_buff *skb)
6030{
6031 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6032 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6033 u8 *h192, *r192, *h256, *r256;
6034 struct mgmt_pending_cmd *cmd;
6035 u16 eir_len;
6036 int err;
6037
6038 BT_DBG("%s status %u", hdev->name, status);
6039
6040 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6041 if (!cmd)
6042 return;
6043
6044 mgmt_cp = cmd->param;
6045
6046 if (status) {
6047 status = mgmt_status(status);
6048 eir_len = 0;
6049
6050 h192 = NULL;
6051 r192 = NULL;
6052 h256 = NULL;
6053 r256 = NULL;
6054 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6055 struct hci_rp_read_local_oob_data *rp;
6056
6057 if (skb->len != sizeof(*rp)) {
6058 status = MGMT_STATUS_FAILED;
6059 eir_len = 0;
6060 } else {
6061 status = MGMT_STATUS_SUCCESS;
6062 rp = (void *)skb->data;
6063
6064 eir_len = 5 + 18 + 18;
6065 h192 = rp->hash;
6066 r192 = rp->rand;
6067 h256 = NULL;
6068 r256 = NULL;
6069 }
6070 } else {
6071 struct hci_rp_read_local_oob_ext_data *rp;
6072
6073 if (skb->len != sizeof(*rp)) {
6074 status = MGMT_STATUS_FAILED;
6075 eir_len = 0;
6076 } else {
6077 status = MGMT_STATUS_SUCCESS;
6078 rp = (void *)skb->data;
6079
6080 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6081 eir_len = 5 + 18 + 18;
6082 h192 = NULL;
6083 r192 = NULL;
6084 } else {
6085 eir_len = 5 + 18 + 18 + 18 + 18;
6086 h192 = rp->hash192;
6087 r192 = rp->rand192;
6088 }
6089
6090 h256 = rp->hash256;
6091 r256 = rp->rand256;
6092 }
6093 }
6094
6095 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6096 if (!mgmt_rp)
6097 goto done;
6098
6099 if (status)
6100 goto send_rsp;
6101
6102 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6103 hdev->dev_class, 3);
6104
6105 if (h192 && r192) {
6106 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6107 EIR_SSP_HASH_C192, h192, 16);
6108 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6109 EIR_SSP_RAND_R192, r192, 16);
6110 }
6111
6112 if (h256 && r256) {
6113 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6114 EIR_SSP_HASH_C256, h256, 16);
6115 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6116 EIR_SSP_RAND_R256, r256, 16);
6117 }
6118
6119send_rsp:
6120 mgmt_rp->type = mgmt_cp->type;
6121 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6122
6123 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6124 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6125 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6126 if (err < 0 || status)
6127 goto done;
6128
6129 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6130
6131 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6132 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6133 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6134done:
6135 kfree(mgmt_rp);
6136 mgmt_pending_remove(cmd);
6137}
6138
6139static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6140 struct mgmt_cp_read_local_oob_ext_data *cp)
6141{
6142 struct mgmt_pending_cmd *cmd;
6143 struct hci_request req;
6144 int err;
6145
6146 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6147 cp, sizeof(*cp));
6148 if (!cmd)
6149 return -ENOMEM;
6150
6151 hci_req_init(&req, hdev);
6152
6153 if (bredr_sc_enabled(hdev))
6154 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6155 else
6156 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6157
6158 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6159 if (err < 0) {
6160 mgmt_pending_remove(cmd);
6161 return err;
6162 }
6163
6164 return 0;
6165}
6166
6167static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6168 void *data, u16 data_len)
6169{
6170 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6171 struct mgmt_rp_read_local_oob_ext_data *rp;
6172 size_t rp_len;
6173 u16 eir_len;
6174 u8 status, flags, role, addr[7], hash[16], rand[16];
6175 int err;
6176
6177 BT_DBG("%s", hdev->name);
6178
6179 if (hdev_is_powered(hdev)) {
6180 switch (cp->type) {
6181 case BIT(BDADDR_BREDR):
6182 status = mgmt_bredr_support(hdev);
6183 if (status)
6184 eir_len = 0;
6185 else
6186 eir_len = 5;
6187 break;
6188 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6189 status = mgmt_le_support(hdev);
6190 if (status)
6191 eir_len = 0;
6192 else
6193 eir_len = 9 + 3 + 18 + 18 + 3;
6194 break;
6195 default:
6196 status = MGMT_STATUS_INVALID_PARAMS;
6197 eir_len = 0;
6198 break;
6199 }
6200 } else {
6201 status = MGMT_STATUS_NOT_POWERED;
6202 eir_len = 0;
6203 }
6204
6205 rp_len = sizeof(*rp) + eir_len;
6206 rp = kmalloc(rp_len, GFP_ATOMIC);
6207 if (!rp)
6208 return -ENOMEM;
6209
6210 if (status)
6211 goto complete;
6212
6213 hci_dev_lock(hdev);
6214
6215 eir_len = 0;
6216 switch (cp->type) {
6217 case BIT(BDADDR_BREDR):
6218 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6219 err = read_local_ssp_oob_req(hdev, sk, cp);
6220 hci_dev_unlock(hdev);
6221 if (!err)
6222 goto done;
6223
6224 status = MGMT_STATUS_FAILED;
6225 goto complete;
6226 } else {
6227 eir_len = eir_append_data(rp->eir, eir_len,
6228 EIR_CLASS_OF_DEV,
6229 hdev->dev_class, 3);
6230 }
6231 break;
6232 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6233 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6234 smp_generate_oob(hdev, hash, rand) < 0) {
6235 hci_dev_unlock(hdev);
6236 status = MGMT_STATUS_FAILED;
6237 goto complete;
6238 }
6239
6240 /* This should return the active RPA, but since the RPA
6241 * is only programmed on demand, it is really hard to fill
6242 * this in at the moment. For now disallow retrieving
6243 * local out-of-band data when privacy is in use.
6244 *
6245 * Returning the identity address will not help here since
6246 * pairing happens before the identity resolving key is
6247 * known and thus the connection establishment happens
6248 * based on the RPA and not the identity address.
6249 */
6250 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6251 hci_dev_unlock(hdev);
6252 status = MGMT_STATUS_REJECTED;
6253 goto complete;
6254 }
6255
6256 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6257 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6258 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6259 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6260 memcpy(addr, &hdev->static_addr, 6);
6261 addr[6] = 0x01;
6262 } else {
6263 memcpy(addr, &hdev->bdaddr, 6);
6264 addr[6] = 0x00;
6265 }
6266
6267 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6268 addr, sizeof(addr));
6269
6270 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6271 role = 0x02;
6272 else
6273 role = 0x01;
6274
6275 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6276 &role, sizeof(role));
6277
6278 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6279 eir_len = eir_append_data(rp->eir, eir_len,
6280 EIR_LE_SC_CONFIRM,
6281 hash, sizeof(hash));
6282
6283 eir_len = eir_append_data(rp->eir, eir_len,
6284 EIR_LE_SC_RANDOM,
6285 rand, sizeof(rand));
6286 }
6287
6288 flags = mgmt_get_adv_discov_flags(hdev);
6289
6290 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6291 flags |= LE_AD_NO_BREDR;
6292
6293 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6294 &flags, sizeof(flags));
6295 break;
6296 }
6297
6298 hci_dev_unlock(hdev);
6299
6300 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6301
6302 status = MGMT_STATUS_SUCCESS;
6303
6304complete:
6305 rp->type = cp->type;
6306 rp->eir_len = cpu_to_le16(eir_len);
6307
6308 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6309 status, rp, sizeof(*rp) + eir_len);
6310 if (err < 0 || status)
6311 goto done;
6312
6313 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6314 rp, sizeof(*rp) + eir_len,
6315 HCI_MGMT_OOB_DATA_EVENTS, sk);
6316
6317done:
6318 kfree(rp);
6319
6320 return err;
6321}
6322
6323static u32 get_supported_adv_flags(struct hci_dev *hdev)
6324{
6325 u32 flags = 0;
6326
6327 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6328 flags |= MGMT_ADV_FLAG_DISCOV;
6329 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6330 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6331 flags |= MGMT_ADV_FLAG_APPEARANCE;
6332 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6333
6334 /* In extended adv TX_POWER returned from Set Adv Param
6335 * will be always valid.
6336 */
6337 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6338 ext_adv_capable(hdev))
6339 flags |= MGMT_ADV_FLAG_TX_POWER;
6340
6341 if (ext_adv_capable(hdev)) {
6342 flags |= MGMT_ADV_FLAG_SEC_1M;
6343
6344 if (hdev->le_features[1] & HCI_LE_PHY_2M)
6345 flags |= MGMT_ADV_FLAG_SEC_2M;
6346
6347 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6348 flags |= MGMT_ADV_FLAG_SEC_CODED;
6349 }
6350
6351 return flags;
6352}
6353
6354static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6355 void *data, u16 data_len)
6356{
6357 struct mgmt_rp_read_adv_features *rp;
6358 size_t rp_len;
6359 int err;
6360 struct adv_info *adv_instance;
6361 u32 supported_flags;
6362 u8 *instance;
6363
6364 BT_DBG("%s", hdev->name);
6365
6366 if (!lmp_le_capable(hdev))
6367 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6368 MGMT_STATUS_REJECTED);
6369
6370 hci_dev_lock(hdev);
6371
6372 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6373 rp = kmalloc(rp_len, GFP_ATOMIC);
6374 if (!rp) {
6375 hci_dev_unlock(hdev);
6376 return -ENOMEM;
6377 }
6378
6379 supported_flags = get_supported_adv_flags(hdev);
6380
6381 rp->supported_flags = cpu_to_le32(supported_flags);
6382 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6383 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6384 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6385 rp->num_instances = hdev->adv_instance_cnt;
6386
6387 instance = rp->instance;
6388 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6389 *instance = adv_instance->instance;
6390 instance++;
6391 }
6392
6393 hci_dev_unlock(hdev);
6394
6395 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6396 MGMT_STATUS_SUCCESS, rp, rp_len);
6397
6398 kfree(rp);
6399
6400 return err;
6401}
6402
6403static u8 calculate_name_len(struct hci_dev *hdev)
6404{
6405 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6406
6407 return append_local_name(hdev, buf, 0);
6408}
6409
6410static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6411 bool is_adv_data)
6412{
6413 u8 max_len = HCI_MAX_AD_LENGTH;
6414
6415 if (is_adv_data) {
6416 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6417 MGMT_ADV_FLAG_LIMITED_DISCOV |
6418 MGMT_ADV_FLAG_MANAGED_FLAGS))
6419 max_len -= 3;
6420
6421 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6422 max_len -= 3;
6423 } else {
6424 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6425 max_len -= calculate_name_len(hdev);
6426
6427 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6428 max_len -= 4;
6429 }
6430
6431 return max_len;
6432}
6433
6434static bool flags_managed(u32 adv_flags)
6435{
6436 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6437 MGMT_ADV_FLAG_LIMITED_DISCOV |
6438 MGMT_ADV_FLAG_MANAGED_FLAGS);
6439}
6440
6441static bool tx_power_managed(u32 adv_flags)
6442{
6443 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6444}
6445
6446static bool name_managed(u32 adv_flags)
6447{
6448 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6449}
6450
6451static bool appearance_managed(u32 adv_flags)
6452{
6453 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6454}
6455
6456static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6457 u8 len, bool is_adv_data)
6458{
6459 int i, cur_len;
6460 u8 max_len;
6461
6462 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6463
6464 if (len > max_len)
6465 return false;
6466
6467 /* Make sure that the data is correctly formatted. */
6468 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6469 cur_len = data[i];
6470
6471 if (!cur_len)
6472 continue;
6473
6474 if (data[i + 1] == EIR_FLAGS &&
6475 (!is_adv_data || flags_managed(adv_flags)))
6476 return false;
6477
6478 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6479 return false;
6480
6481 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6482 return false;
6483
6484 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6485 return false;
6486
6487 if (data[i + 1] == EIR_APPEARANCE &&
6488 appearance_managed(adv_flags))
6489 return false;
6490
6491 /* If the current field length would exceed the total data
6492 * length, then it's invalid.
6493 */
6494 if (i + cur_len >= len)
6495 return false;
6496 }
6497
6498 return true;
6499}
6500
6501static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6502 u16 opcode)
6503{
6504 struct mgmt_pending_cmd *cmd;
6505 struct mgmt_cp_add_advertising *cp;
6506 struct mgmt_rp_add_advertising rp;
6507 struct adv_info *adv_instance, *n;
6508 u8 instance;
6509
6510 BT_DBG("status %d", status);
6511
6512 hci_dev_lock(hdev);
6513
6514 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6515
6516 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6517 if (!adv_instance->pending)
6518 continue;
6519
6520 if (!status) {
6521 adv_instance->pending = false;
6522 continue;
6523 }
6524
6525 instance = adv_instance->instance;
6526
6527 if (hdev->cur_adv_instance == instance)
6528 cancel_adv_timeout(hdev);
6529
6530 hci_remove_adv_instance(hdev, instance);
6531 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6532 }
6533
6534 if (!cmd)
6535 goto unlock;
6536
6537 cp = cmd->param;
6538 rp.instance = cp->instance;
6539
6540 if (status)
6541 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6542 mgmt_status(status));
6543 else
6544 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6545 mgmt_status(status), &rp, sizeof(rp));
6546
6547 mgmt_pending_remove(cmd);
6548
6549unlock:
6550 hci_dev_unlock(hdev);
6551}
6552
6553static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6554 void *data, u16 data_len)
6555{
6556 struct mgmt_cp_add_advertising *cp = data;
6557 struct mgmt_rp_add_advertising rp;
6558 u32 flags;
6559 u32 supported_flags, phy_flags;
6560 u8 status;
6561 u16 timeout, duration;
6562 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6563 u8 schedule_instance = 0;
6564 struct adv_info *next_instance;
6565 int err;
6566 struct mgmt_pending_cmd *cmd;
6567 struct hci_request req;
6568
6569 BT_DBG("%s", hdev->name);
6570
6571 status = mgmt_le_support(hdev);
6572 if (status)
6573 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6574 status);
6575
6576 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6578 MGMT_STATUS_INVALID_PARAMS);
6579
6580 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6582 MGMT_STATUS_INVALID_PARAMS);
6583
6584 flags = __le32_to_cpu(cp->flags);
6585 timeout = __le16_to_cpu(cp->timeout);
6586 duration = __le16_to_cpu(cp->duration);
6587
6588 /* The current implementation only supports a subset of the specified
6589 * flags. Also need to check mutual exclusiveness of sec flags.
6590 */
6591 supported_flags = get_supported_adv_flags(hdev);
6592 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6593 if (flags & ~supported_flags ||
6594 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6595 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6596 MGMT_STATUS_INVALID_PARAMS);
6597
6598 hci_dev_lock(hdev);
6599
6600 if (timeout && !hdev_is_powered(hdev)) {
6601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6602 MGMT_STATUS_REJECTED);
6603 goto unlock;
6604 }
6605
6606 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6607 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6608 pending_find(MGMT_OP_SET_LE, hdev)) {
6609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6610 MGMT_STATUS_BUSY);
6611 goto unlock;
6612 }
6613
6614 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6615 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6616 cp->scan_rsp_len, false)) {
6617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6618 MGMT_STATUS_INVALID_PARAMS);
6619 goto unlock;
6620 }
6621
6622 err = hci_add_adv_instance(hdev, cp->instance, flags,
6623 cp->adv_data_len, cp->data,
6624 cp->scan_rsp_len,
6625 cp->data + cp->adv_data_len,
6626 timeout, duration);
6627 if (err < 0) {
6628 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6629 MGMT_STATUS_FAILED);
6630 goto unlock;
6631 }
6632
6633 /* Only trigger an advertising added event if a new instance was
6634 * actually added.
6635 */
6636 if (hdev->adv_instance_cnt > prev_instance_cnt)
6637 mgmt_advertising_added(sk, hdev, cp->instance);
6638
6639 if (hdev->cur_adv_instance == cp->instance) {
6640 /* If the currently advertised instance is being changed then
6641 * cancel the current advertising and schedule the next
6642 * instance. If there is only one instance then the overridden
6643 * advertising data will be visible right away.
6644 */
6645 cancel_adv_timeout(hdev);
6646
6647 next_instance = hci_get_next_instance(hdev, cp->instance);
6648 if (next_instance)
6649 schedule_instance = next_instance->instance;
6650 } else if (!hdev->adv_instance_timeout) {
6651 /* Immediately advertise the new instance if no other
6652 * instance is currently being advertised.
6653 */
6654 schedule_instance = cp->instance;
6655 }
6656
6657 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6658 * there is no instance to be advertised then we have no HCI
6659 * communication to make. Simply return.
6660 */
6661 if (!hdev_is_powered(hdev) ||
6662 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6663 !schedule_instance) {
6664 rp.instance = cp->instance;
6665 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6666 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6667 goto unlock;
6668 }
6669
6670 /* We're good to go, update advertising data, parameters, and start
6671 * advertising.
6672 */
6673 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6674 data_len);
6675 if (!cmd) {
6676 err = -ENOMEM;
6677 goto unlock;
6678 }
6679
6680 hci_req_init(&req, hdev);
6681
6682 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6683
6684 if (!err)
6685 err = hci_req_run(&req, add_advertising_complete);
6686
6687 if (err < 0)
6688 mgmt_pending_remove(cmd);
6689
6690unlock:
6691 hci_dev_unlock(hdev);
6692
6693 return err;
6694}
6695
6696static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6697 u16 opcode)
6698{
6699 struct mgmt_pending_cmd *cmd;
6700 struct mgmt_cp_remove_advertising *cp;
6701 struct mgmt_rp_remove_advertising rp;
6702
6703 BT_DBG("status %d", status);
6704
6705 hci_dev_lock(hdev);
6706
6707 /* A failure status here only means that we failed to disable
6708 * advertising. Otherwise, the advertising instance has been removed,
6709 * so report success.
6710 */
6711 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6712 if (!cmd)
6713 goto unlock;
6714
6715 cp = cmd->param;
6716 rp.instance = cp->instance;
6717
6718 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6719 &rp, sizeof(rp));
6720 mgmt_pending_remove(cmd);
6721
6722unlock:
6723 hci_dev_unlock(hdev);
6724}
6725
6726static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6727 void *data, u16 data_len)
6728{
6729 struct mgmt_cp_remove_advertising *cp = data;
6730 struct mgmt_rp_remove_advertising rp;
6731 struct mgmt_pending_cmd *cmd;
6732 struct hci_request req;
6733 int err;
6734
6735 BT_DBG("%s", hdev->name);
6736
6737 hci_dev_lock(hdev);
6738
6739 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6740 err = mgmt_cmd_status(sk, hdev->id,
6741 MGMT_OP_REMOVE_ADVERTISING,
6742 MGMT_STATUS_INVALID_PARAMS);
6743 goto unlock;
6744 }
6745
6746 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6747 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6748 pending_find(MGMT_OP_SET_LE, hdev)) {
6749 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6750 MGMT_STATUS_BUSY);
6751 goto unlock;
6752 }
6753
6754 if (list_empty(&hdev->adv_instances)) {
6755 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6756 MGMT_STATUS_INVALID_PARAMS);
6757 goto unlock;
6758 }
6759
6760 hci_req_init(&req, hdev);
6761
6762 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6763
6764 if (list_empty(&hdev->adv_instances))
6765 __hci_req_disable_advertising(&req);
6766
6767 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6768 * flag is set or the device isn't powered then we have no HCI
6769 * communication to make. Simply return.
6770 */
6771 if (skb_queue_empty(&req.cmd_q) ||
6772 !hdev_is_powered(hdev) ||
6773 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6774 hci_req_purge(&req);
6775 rp.instance = cp->instance;
6776 err = mgmt_cmd_complete(sk, hdev->id,
6777 MGMT_OP_REMOVE_ADVERTISING,
6778 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6779 goto unlock;
6780 }
6781
6782 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6783 data_len);
6784 if (!cmd) {
6785 err = -ENOMEM;
6786 goto unlock;
6787 }
6788
6789 err = hci_req_run(&req, remove_advertising_complete);
6790 if (err < 0)
6791 mgmt_pending_remove(cmd);
6792
6793unlock:
6794 hci_dev_unlock(hdev);
6795
6796 return err;
6797}
6798
6799static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6800 void *data, u16 data_len)
6801{
6802 struct mgmt_cp_get_adv_size_info *cp = data;
6803 struct mgmt_rp_get_adv_size_info rp;
6804 u32 flags, supported_flags;
6805 int err;
6806
6807 BT_DBG("%s", hdev->name);
6808
6809 if (!lmp_le_capable(hdev))
6810 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6811 MGMT_STATUS_REJECTED);
6812
6813 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6815 MGMT_STATUS_INVALID_PARAMS);
6816
6817 flags = __le32_to_cpu(cp->flags);
6818
6819 /* The current implementation only supports a subset of the specified
6820 * flags.
6821 */
6822 supported_flags = get_supported_adv_flags(hdev);
6823 if (flags & ~supported_flags)
6824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6825 MGMT_STATUS_INVALID_PARAMS);
6826
6827 rp.instance = cp->instance;
6828 rp.flags = cp->flags;
6829 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6830 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6831
6832 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6833 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6834
6835 return err;
6836}
6837
6838static const struct hci_mgmt_handler mgmt_handlers[] = {
6839 { NULL }, /* 0x0000 (no command) */
6840 { read_version, MGMT_READ_VERSION_SIZE,
6841 HCI_MGMT_NO_HDEV |
6842 HCI_MGMT_UNTRUSTED },
6843 { read_commands, MGMT_READ_COMMANDS_SIZE,
6844 HCI_MGMT_NO_HDEV |
6845 HCI_MGMT_UNTRUSTED },
6846 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6847 HCI_MGMT_NO_HDEV |
6848 HCI_MGMT_UNTRUSTED },
6849 { read_controller_info, MGMT_READ_INFO_SIZE,
6850 HCI_MGMT_UNTRUSTED },
6851 { set_powered, MGMT_SETTING_SIZE },
6852 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6853 { set_connectable, MGMT_SETTING_SIZE },
6854 { set_fast_connectable, MGMT_SETTING_SIZE },
6855 { set_bondable, MGMT_SETTING_SIZE },
6856 { set_link_security, MGMT_SETTING_SIZE },
6857 { set_ssp, MGMT_SETTING_SIZE },
6858 { set_hs, MGMT_SETTING_SIZE },
6859 { set_le, MGMT_SETTING_SIZE },
6860 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6861 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6862 { add_uuid, MGMT_ADD_UUID_SIZE },
6863 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6864 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6865 HCI_MGMT_VAR_LEN },
6866 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6867 HCI_MGMT_VAR_LEN },
6868 { disconnect, MGMT_DISCONNECT_SIZE },
6869 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6870 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6871 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6872 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6873 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6874 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6875 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6876 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6877 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6878 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6879 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6880 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6881 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6882 HCI_MGMT_VAR_LEN },
6883 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6884 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6885 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6886 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6887 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6888 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6889 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6890 { set_advertising, MGMT_SETTING_SIZE },
6891 { set_bredr, MGMT_SETTING_SIZE },
6892 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6893 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6894 { set_secure_conn, MGMT_SETTING_SIZE },
6895 { set_debug_keys, MGMT_SETTING_SIZE },
6896 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6897 { load_irks, MGMT_LOAD_IRKS_SIZE,
6898 HCI_MGMT_VAR_LEN },
6899 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6900 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6901 { add_device, MGMT_ADD_DEVICE_SIZE },
6902 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6903 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6904 HCI_MGMT_VAR_LEN },
6905 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6906 HCI_MGMT_NO_HDEV |
6907 HCI_MGMT_UNTRUSTED },
6908 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6909 HCI_MGMT_UNCONFIGURED |
6910 HCI_MGMT_UNTRUSTED },
6911 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6912 HCI_MGMT_UNCONFIGURED },
6913 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6914 HCI_MGMT_UNCONFIGURED },
6915 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6916 HCI_MGMT_VAR_LEN },
6917 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6918 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6919 HCI_MGMT_NO_HDEV |
6920 HCI_MGMT_UNTRUSTED },
6921 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6922 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6923 HCI_MGMT_VAR_LEN },
6924 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6925 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6926 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6927 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6928 HCI_MGMT_UNTRUSTED },
6929 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6930 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
6931 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
6932};
6933
6934void mgmt_index_added(struct hci_dev *hdev)
6935{
6936 struct mgmt_ev_ext_index ev;
6937
6938 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6939 return;
6940
6941 switch (hdev->dev_type) {
6942 case HCI_PRIMARY:
6943 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6944 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6945 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6946 ev.type = 0x01;
6947 } else {
6948 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6949 HCI_MGMT_INDEX_EVENTS);
6950 ev.type = 0x00;
6951 }
6952 break;
6953 case HCI_AMP:
6954 ev.type = 0x02;
6955 break;
6956 default:
6957 return;
6958 }
6959
6960 ev.bus = hdev->bus;
6961
6962 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6963 HCI_MGMT_EXT_INDEX_EVENTS);
6964}
6965
6966void mgmt_index_removed(struct hci_dev *hdev)
6967{
6968 struct mgmt_ev_ext_index ev;
6969 u8 status = MGMT_STATUS_INVALID_INDEX;
6970
6971 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6972 return;
6973
6974 switch (hdev->dev_type) {
6975 case HCI_PRIMARY:
6976 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6977
6978 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6979 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6980 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6981 ev.type = 0x01;
6982 } else {
6983 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6984 HCI_MGMT_INDEX_EVENTS);
6985 ev.type = 0x00;
6986 }
6987 break;
6988 case HCI_AMP:
6989 ev.type = 0x02;
6990 break;
6991 default:
6992 return;
6993 }
6994
6995 ev.bus = hdev->bus;
6996
6997 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6998 HCI_MGMT_EXT_INDEX_EVENTS);
6999}
7000
7001/* This function requires the caller holds hdev->lock */
7002static void restart_le_actions(struct hci_dev *hdev)
7003{
7004 struct hci_conn_params *p;
7005
7006 list_for_each_entry(p, &hdev->le_conn_params, list) {
7007 /* Needed for AUTO_OFF case where might not "really"
7008 * have been powered off.
7009 */
7010 list_del_init(&p->action);
7011
7012 switch (p->auto_connect) {
7013 case HCI_AUTO_CONN_DIRECT:
7014 case HCI_AUTO_CONN_ALWAYS:
7015 list_add(&p->action, &hdev->pend_le_conns);
7016 break;
7017 case HCI_AUTO_CONN_REPORT:
7018 list_add(&p->action, &hdev->pend_le_reports);
7019 break;
7020 default:
7021 break;
7022 }
7023 }
7024}
7025
7026void mgmt_power_on(struct hci_dev *hdev, int err)
7027{
7028 struct cmd_lookup match = { NULL, hdev };
7029
7030 BT_DBG("err %d", err);
7031
7032 hci_dev_lock(hdev);
7033
7034 if (!err) {
7035 restart_le_actions(hdev);
7036 hci_update_background_scan(hdev);
7037 }
7038
7039 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7040
7041 new_settings(hdev, match.sk);
7042
7043 if (match.sk)
7044 sock_put(match.sk);
7045
7046 hci_dev_unlock(hdev);
7047}
7048
7049void __mgmt_power_off(struct hci_dev *hdev)
7050{
7051 struct cmd_lookup match = { NULL, hdev };
7052 u8 status, zero_cod[] = { 0, 0, 0 };
7053
7054 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7055
7056 /* If the power off is because of hdev unregistration let
7057 * use the appropriate INVALID_INDEX status. Otherwise use
7058 * NOT_POWERED. We cover both scenarios here since later in
7059 * mgmt_index_removed() any hci_conn callbacks will have already
7060 * been triggered, potentially causing misleading DISCONNECTED
7061 * status responses.
7062 */
7063 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7064 status = MGMT_STATUS_INVALID_INDEX;
7065 else
7066 status = MGMT_STATUS_NOT_POWERED;
7067
7068 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7069
7070 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7071 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7072 zero_cod, sizeof(zero_cod),
7073 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7074 ext_info_changed(hdev, NULL);
7075 }
7076
7077 new_settings(hdev, match.sk);
7078
7079 if (match.sk)
7080 sock_put(match.sk);
7081}
7082
7083void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7084{
7085 struct mgmt_pending_cmd *cmd;
7086 u8 status;
7087
7088 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7089 if (!cmd)
7090 return;
7091
7092 if (err == -ERFKILL)
7093 status = MGMT_STATUS_RFKILLED;
7094 else
7095 status = MGMT_STATUS_FAILED;
7096
7097 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7098
7099 mgmt_pending_remove(cmd);
7100}
7101
7102void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7103 bool persistent)
7104{
7105 struct mgmt_ev_new_link_key ev;
7106
7107 memset(&ev, 0, sizeof(ev));
7108
7109 ev.store_hint = persistent;
7110 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7111 ev.key.addr.type = BDADDR_BREDR;
7112 ev.key.type = key->type;
7113 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7114 ev.key.pin_len = key->pin_len;
7115
7116 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7117}
7118
7119static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7120{
7121 switch (ltk->type) {
7122 case SMP_LTK:
7123 case SMP_LTK_SLAVE:
7124 if (ltk->authenticated)
7125 return MGMT_LTK_AUTHENTICATED;
7126 return MGMT_LTK_UNAUTHENTICATED;
7127 case SMP_LTK_P256:
7128 if (ltk->authenticated)
7129 return MGMT_LTK_P256_AUTH;
7130 return MGMT_LTK_P256_UNAUTH;
7131 case SMP_LTK_P256_DEBUG:
7132 return MGMT_LTK_P256_DEBUG;
7133 }
7134
7135 return MGMT_LTK_UNAUTHENTICATED;
7136}
7137
7138void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7139{
7140 struct mgmt_ev_new_long_term_key ev;
7141
7142 memset(&ev, 0, sizeof(ev));
7143
7144 /* Devices using resolvable or non-resolvable random addresses
7145 * without providing an identity resolving key don't require
7146 * to store long term keys. Their addresses will change the
7147 * next time around.
7148 *
7149 * Only when a remote device provides an identity address
7150 * make sure the long term key is stored. If the remote
7151 * identity is known, the long term keys are internally
7152 * mapped to the identity address. So allow static random
7153 * and public addresses here.
7154 */
7155 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7156 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7157 ev.store_hint = 0x00;
7158 else
7159 ev.store_hint = persistent;
7160
7161 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7162 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7163 ev.key.type = mgmt_ltk_type(key);
7164 ev.key.enc_size = key->enc_size;
7165 ev.key.ediv = key->ediv;
7166 ev.key.rand = key->rand;
7167
7168 if (key->type == SMP_LTK)
7169 ev.key.master = 1;
7170
7171 /* Make sure we copy only the significant bytes based on the
7172 * encryption key size, and set the rest of the value to zeroes.
7173 */
7174 memcpy(ev.key.val, key->val, key->enc_size);
7175 memset(ev.key.val + key->enc_size, 0,
7176 sizeof(ev.key.val) - key->enc_size);
7177
7178 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7179}
7180
7181void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7182{
7183 struct mgmt_ev_new_irk ev;
7184
7185 memset(&ev, 0, sizeof(ev));
7186
7187 ev.store_hint = persistent;
7188
7189 bacpy(&ev.rpa, &irk->rpa);
7190 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7191 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7192 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7193
7194 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7195}
7196
7197void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7198 bool persistent)
7199{
7200 struct mgmt_ev_new_csrk ev;
7201
7202 memset(&ev, 0, sizeof(ev));
7203
7204 /* Devices using resolvable or non-resolvable random addresses
7205 * without providing an identity resolving key don't require
7206 * to store signature resolving keys. Their addresses will change
7207 * the next time around.
7208 *
7209 * Only when a remote device provides an identity address
7210 * make sure the signature resolving key is stored. So allow
7211 * static random and public addresses here.
7212 */
7213 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7214 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7215 ev.store_hint = 0x00;
7216 else
7217 ev.store_hint = persistent;
7218
7219 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7220 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7221 ev.key.type = csrk->type;
7222 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7223
7224 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7225}
7226
7227void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7228 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7229 u16 max_interval, u16 latency, u16 timeout)
7230{
7231 struct mgmt_ev_new_conn_param ev;
7232
7233 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7234 return;
7235
7236 memset(&ev, 0, sizeof(ev));
7237 bacpy(&ev.addr.bdaddr, bdaddr);
7238 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7239 ev.store_hint = store_hint;
7240 ev.min_interval = cpu_to_le16(min_interval);
7241 ev.max_interval = cpu_to_le16(max_interval);
7242 ev.latency = cpu_to_le16(latency);
7243 ev.timeout = cpu_to_le16(timeout);
7244
7245 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7246}
7247
7248void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7249 u32 flags, u8 *name, u8 name_len)
7250{
7251 char buf[512];
7252 struct mgmt_ev_device_connected *ev = (void *) buf;
7253 u16 eir_len = 0;
7254
7255 bacpy(&ev->addr.bdaddr, &conn->dst);
7256 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7257
7258 ev->flags = __cpu_to_le32(flags);
7259
7260 /* We must ensure that the EIR Data fields are ordered and
7261 * unique. Keep it simple for now and avoid the problem by not
7262 * adding any BR/EDR data to the LE adv.
7263 */
7264 if (conn->le_adv_data_len > 0) {
7265 memcpy(&ev->eir[eir_len],
7266 conn->le_adv_data, conn->le_adv_data_len);
7267 eir_len = conn->le_adv_data_len;
7268 } else {
7269 if (name_len > 0)
7270 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7271 name, name_len);
7272
7273 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7274 eir_len = eir_append_data(ev->eir, eir_len,
7275 EIR_CLASS_OF_DEV,
7276 conn->dev_class, 3);
7277 }
7278
7279 ev->eir_len = cpu_to_le16(eir_len);
7280
7281 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7282 sizeof(*ev) + eir_len, NULL);
7283}
7284
7285static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7286{
7287 struct sock **sk = data;
7288
7289 cmd->cmd_complete(cmd, 0);
7290
7291 *sk = cmd->sk;
7292 sock_hold(*sk);
7293
7294 mgmt_pending_remove(cmd);
7295}
7296
7297static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7298{
7299 struct hci_dev *hdev = data;
7300 struct mgmt_cp_unpair_device *cp = cmd->param;
7301
7302 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7303
7304 cmd->cmd_complete(cmd, 0);
7305 mgmt_pending_remove(cmd);
7306}
7307
7308bool mgmt_powering_down(struct hci_dev *hdev)
7309{
7310 struct mgmt_pending_cmd *cmd;
7311 struct mgmt_mode *cp;
7312
7313 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7314 if (!cmd)
7315 return false;
7316
7317 cp = cmd->param;
7318 if (!cp->val)
7319 return true;
7320
7321 return false;
7322}
7323
7324void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7325 u8 link_type, u8 addr_type, u8 reason,
7326 bool mgmt_connected)
7327{
7328 struct mgmt_ev_device_disconnected ev;
7329 struct sock *sk = NULL;
7330
7331 /* The connection is still in hci_conn_hash so test for 1
7332 * instead of 0 to know if this is the last one.
7333 */
7334 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7335 cancel_delayed_work(&hdev->power_off);
7336 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7337 }
7338
7339 if (!mgmt_connected)
7340 return;
7341
7342 if (link_type != ACL_LINK && link_type != LE_LINK)
7343 return;
7344
7345 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7346
7347 bacpy(&ev.addr.bdaddr, bdaddr);
7348 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7349 ev.reason = reason;
7350
7351 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7352
7353 if (sk)
7354 sock_put(sk);
7355
7356 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7357 hdev);
7358}
7359
7360void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7361 u8 link_type, u8 addr_type, u8 status)
7362{
7363 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7364 struct mgmt_cp_disconnect *cp;
7365 struct mgmt_pending_cmd *cmd;
7366
7367 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7368 hdev);
7369
7370 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7371 if (!cmd)
7372 return;
7373
7374 cp = cmd->param;
7375
7376 if (bacmp(bdaddr, &cp->addr.bdaddr))
7377 return;
7378
7379 if (cp->addr.type != bdaddr_type)
7380 return;
7381
7382 cmd->cmd_complete(cmd, mgmt_status(status));
7383 mgmt_pending_remove(cmd);
7384}
7385
7386void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7387 u8 addr_type, u8 status)
7388{
7389 struct mgmt_ev_connect_failed ev;
7390
7391 /* The connection is still in hci_conn_hash so test for 1
7392 * instead of 0 to know if this is the last one.
7393 */
7394 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7395 cancel_delayed_work(&hdev->power_off);
7396 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7397 }
7398
7399 bacpy(&ev.addr.bdaddr, bdaddr);
7400 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7401 ev.status = mgmt_status(status);
7402
7403 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7404}
7405
7406void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7407{
7408 struct mgmt_ev_pin_code_request ev;
7409
7410 bacpy(&ev.addr.bdaddr, bdaddr);
7411 ev.addr.type = BDADDR_BREDR;
7412 ev.secure = secure;
7413
7414 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7415}
7416
7417void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7418 u8 status)
7419{
7420 struct mgmt_pending_cmd *cmd;
7421
7422 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7423 if (!cmd)
7424 return;
7425
7426 cmd->cmd_complete(cmd, mgmt_status(status));
7427 mgmt_pending_remove(cmd);
7428}
7429
7430void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7431 u8 status)
7432{
7433 struct mgmt_pending_cmd *cmd;
7434
7435 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7436 if (!cmd)
7437 return;
7438
7439 cmd->cmd_complete(cmd, mgmt_status(status));
7440 mgmt_pending_remove(cmd);
7441}
7442
7443int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7444 u8 link_type, u8 addr_type, u32 value,
7445 u8 confirm_hint)
7446{
7447 struct mgmt_ev_user_confirm_request ev;
7448
7449 BT_DBG("%s", hdev->name);
7450
7451 bacpy(&ev.addr.bdaddr, bdaddr);
7452 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7453 ev.confirm_hint = confirm_hint;
7454 ev.value = cpu_to_le32(value);
7455
7456 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7457 NULL);
7458}
7459
7460int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7461 u8 link_type, u8 addr_type)
7462{
7463 struct mgmt_ev_user_passkey_request ev;
7464
7465 BT_DBG("%s", hdev->name);
7466
7467 bacpy(&ev.addr.bdaddr, bdaddr);
7468 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7469
7470 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7471 NULL);
7472}
7473
7474static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7475 u8 link_type, u8 addr_type, u8 status,
7476 u8 opcode)
7477{
7478 struct mgmt_pending_cmd *cmd;
7479
7480 cmd = pending_find(opcode, hdev);
7481 if (!cmd)
7482 return -ENOENT;
7483
7484 cmd->cmd_complete(cmd, mgmt_status(status));
7485 mgmt_pending_remove(cmd);
7486
7487 return 0;
7488}
7489
7490int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7491 u8 link_type, u8 addr_type, u8 status)
7492{
7493 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7494 status, MGMT_OP_USER_CONFIRM_REPLY);
7495}
7496
7497int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7498 u8 link_type, u8 addr_type, u8 status)
7499{
7500 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7501 status,
7502 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7503}
7504
7505int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7506 u8 link_type, u8 addr_type, u8 status)
7507{
7508 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7509 status, MGMT_OP_USER_PASSKEY_REPLY);
7510}
7511
7512int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7513 u8 link_type, u8 addr_type, u8 status)
7514{
7515 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7516 status,
7517 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7518}
7519
7520int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7521 u8 link_type, u8 addr_type, u32 passkey,
7522 u8 entered)
7523{
7524 struct mgmt_ev_passkey_notify ev;
7525
7526 BT_DBG("%s", hdev->name);
7527
7528 bacpy(&ev.addr.bdaddr, bdaddr);
7529 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7530 ev.passkey = __cpu_to_le32(passkey);
7531 ev.entered = entered;
7532
7533 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7534}
7535
7536void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7537{
7538 struct mgmt_ev_auth_failed ev;
7539 struct mgmt_pending_cmd *cmd;
7540 u8 status = mgmt_status(hci_status);
7541
7542 bacpy(&ev.addr.bdaddr, &conn->dst);
7543 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7544 ev.status = status;
7545
7546 cmd = find_pairing(conn);
7547
7548 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7549 cmd ? cmd->sk : NULL);
7550
7551 if (cmd) {
7552 cmd->cmd_complete(cmd, status);
7553 mgmt_pending_remove(cmd);
7554 }
7555}
7556
7557void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7558{
7559 struct cmd_lookup match = { NULL, hdev };
7560 bool changed;
7561
7562 if (status) {
7563 u8 mgmt_err = mgmt_status(status);
7564 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7565 cmd_status_rsp, &mgmt_err);
7566 return;
7567 }
7568
7569 if (test_bit(HCI_AUTH, &hdev->flags))
7570 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7571 else
7572 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7573
7574 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7575 &match);
7576
7577 if (changed)
7578 new_settings(hdev, match.sk);
7579
7580 if (match.sk)
7581 sock_put(match.sk);
7582}
7583
7584static void clear_eir(struct hci_request *req)
7585{
7586 struct hci_dev *hdev = req->hdev;
7587 struct hci_cp_write_eir cp;
7588
7589 if (!lmp_ext_inq_capable(hdev))
7590 return;
7591
7592 memset(hdev->eir, 0, sizeof(hdev->eir));
7593
7594 memset(&cp, 0, sizeof(cp));
7595
7596 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7597}
7598
7599void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7600{
7601 struct cmd_lookup match = { NULL, hdev };
7602 struct hci_request req;
7603 bool changed = false;
7604
7605 if (status) {
7606 u8 mgmt_err = mgmt_status(status);
7607
7608 if (enable && hci_dev_test_and_clear_flag(hdev,
7609 HCI_SSP_ENABLED)) {
7610 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7611 new_settings(hdev, NULL);
7612 }
7613
7614 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7615 &mgmt_err);
7616 return;
7617 }
7618
7619 if (enable) {
7620 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7621 } else {
7622 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7623 if (!changed)
7624 changed = hci_dev_test_and_clear_flag(hdev,
7625 HCI_HS_ENABLED);
7626 else
7627 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7628 }
7629
7630 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7631
7632 if (changed)
7633 new_settings(hdev, match.sk);
7634
7635 if (match.sk)
7636 sock_put(match.sk);
7637
7638 hci_req_init(&req, hdev);
7639
7640 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7641 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7642 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7643 sizeof(enable), &enable);
7644 __hci_req_update_eir(&req);
7645 } else {
7646 clear_eir(&req);
7647 }
7648
7649 hci_req_run(&req, NULL);
7650}
7651
7652static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7653{
7654 struct cmd_lookup *match = data;
7655
7656 if (match->sk == NULL) {
7657 match->sk = cmd->sk;
7658 sock_hold(match->sk);
7659 }
7660}
7661
7662void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7663 u8 status)
7664{
7665 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7666
7667 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7668 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7669 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7670
7671 if (!status) {
7672 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7673 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7674 ext_info_changed(hdev, NULL);
7675 }
7676
7677 if (match.sk)
7678 sock_put(match.sk);
7679}
7680
7681void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7682{
7683 struct mgmt_cp_set_local_name ev;
7684 struct mgmt_pending_cmd *cmd;
7685
7686 if (status)
7687 return;
7688
7689 memset(&ev, 0, sizeof(ev));
7690 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7691 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7692
7693 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7694 if (!cmd) {
7695 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7696
7697 /* If this is a HCI command related to powering on the
7698 * HCI dev don't send any mgmt signals.
7699 */
7700 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7701 return;
7702 }
7703
7704 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7705 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7706 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7707}
7708
7709static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7710{
7711 int i;
7712
7713 for (i = 0; i < uuid_count; i++) {
7714 if (!memcmp(uuid, uuids[i], 16))
7715 return true;
7716 }
7717
7718 return false;
7719}
7720
7721static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7722{
7723 u16 parsed = 0;
7724
7725 while (parsed < eir_len) {
7726 u8 field_len = eir[0];
7727 u8 uuid[16];
7728 int i;
7729
7730 if (field_len == 0)
7731 break;
7732
7733 if (eir_len - parsed < field_len + 1)
7734 break;
7735
7736 switch (eir[1]) {
7737 case EIR_UUID16_ALL:
7738 case EIR_UUID16_SOME:
7739 for (i = 0; i + 3 <= field_len; i += 2) {
7740 memcpy(uuid, bluetooth_base_uuid, 16);
7741 uuid[13] = eir[i + 3];
7742 uuid[12] = eir[i + 2];
7743 if (has_uuid(uuid, uuid_count, uuids))
7744 return true;
7745 }
7746 break;
7747 case EIR_UUID32_ALL:
7748 case EIR_UUID32_SOME:
7749 for (i = 0; i + 5 <= field_len; i += 4) {
7750 memcpy(uuid, bluetooth_base_uuid, 16);
7751 uuid[15] = eir[i + 5];
7752 uuid[14] = eir[i + 4];
7753 uuid[13] = eir[i + 3];
7754 uuid[12] = eir[i + 2];
7755 if (has_uuid(uuid, uuid_count, uuids))
7756 return true;
7757 }
7758 break;
7759 case EIR_UUID128_ALL:
7760 case EIR_UUID128_SOME:
7761 for (i = 0; i + 17 <= field_len; i += 16) {
7762 memcpy(uuid, eir + i + 2, 16);
7763 if (has_uuid(uuid, uuid_count, uuids))
7764 return true;
7765 }
7766 break;
7767 }
7768
7769 parsed += field_len + 1;
7770 eir += field_len + 1;
7771 }
7772
7773 return false;
7774}
7775
7776static void restart_le_scan(struct hci_dev *hdev)
7777{
7778 /* If controller is not scanning we are done. */
7779 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7780 return;
7781
7782 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7783 hdev->discovery.scan_start +
7784 hdev->discovery.scan_duration))
7785 return;
7786
7787 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7788 DISCOV_LE_RESTART_DELAY);
7789}
7790
7791static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7792 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7793{
7794 /* If a RSSI threshold has been specified, and
7795 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7796 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7797 * is set, let it through for further processing, as we might need to
7798 * restart the scan.
7799 *
7800 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7801 * the results are also dropped.
7802 */
7803 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7804 (rssi == HCI_RSSI_INVALID ||
7805 (rssi < hdev->discovery.rssi &&
7806 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7807 return false;
7808
7809 if (hdev->discovery.uuid_count != 0) {
7810 /* If a list of UUIDs is provided in filter, results with no
7811 * matching UUID should be dropped.
7812 */
7813 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7814 hdev->discovery.uuids) &&
7815 !eir_has_uuids(scan_rsp, scan_rsp_len,
7816 hdev->discovery.uuid_count,
7817 hdev->discovery.uuids))
7818 return false;
7819 }
7820
7821 /* If duplicate filtering does not report RSSI changes, then restart
7822 * scanning to ensure updated result with updated RSSI values.
7823 */
7824 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7825 restart_le_scan(hdev);
7826
7827 /* Validate RSSI value against the RSSI threshold once more. */
7828 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7829 rssi < hdev->discovery.rssi)
7830 return false;
7831 }
7832
7833 return true;
7834}
7835
7836void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7837 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7838 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7839{
7840 char buf[512];
7841 struct mgmt_ev_device_found *ev = (void *)buf;
7842 size_t ev_size;
7843
7844 /* Don't send events for a non-kernel initiated discovery. With
7845 * LE one exception is if we have pend_le_reports > 0 in which
7846 * case we're doing passive scanning and want these events.
7847 */
7848 if (!hci_discovery_active(hdev)) {
7849 if (link_type == ACL_LINK)
7850 return;
7851 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7852 return;
7853 }
7854
7855 if (hdev->discovery.result_filtering) {
7856 /* We are using service discovery */
7857 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7858 scan_rsp_len))
7859 return;
7860 }
7861
7862 if (hdev->discovery.limited) {
7863 /* Check for limited discoverable bit */
7864 if (dev_class) {
7865 if (!(dev_class[1] & 0x20))
7866 return;
7867 } else {
7868 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7869 if (!flags || !(flags[0] & LE_AD_LIMITED))
7870 return;
7871 }
7872 }
7873
7874 /* Make sure that the buffer is big enough. The 5 extra bytes
7875 * are for the potential CoD field.
7876 */
7877 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7878 return;
7879
7880 memset(buf, 0, sizeof(buf));
7881
7882 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7883 * RSSI value was reported as 0 when not available. This behavior
7884 * is kept when using device discovery. This is required for full
7885 * backwards compatibility with the API.
7886 *
7887 * However when using service discovery, the value 127 will be
7888 * returned when the RSSI is not available.
7889 */
7890 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7891 link_type == ACL_LINK)
7892 rssi = 0;
7893
7894 bacpy(&ev->addr.bdaddr, bdaddr);
7895 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7896 ev->rssi = rssi;
7897 ev->flags = cpu_to_le32(flags);
7898
7899 if (eir_len > 0)
7900 /* Copy EIR or advertising data into event */
7901 memcpy(ev->eir, eir, eir_len);
7902
7903 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7904 NULL))
7905 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7906 dev_class, 3);
7907
7908 if (scan_rsp_len > 0)
7909 /* Append scan response data to event */
7910 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7911
7912 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7913 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7914
7915 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7916}
7917
7918void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7919 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7920{
7921 struct mgmt_ev_device_found *ev;
7922 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7923 u16 eir_len;
7924
7925 ev = (struct mgmt_ev_device_found *) buf;
7926
7927 memset(buf, 0, sizeof(buf));
7928
7929 bacpy(&ev->addr.bdaddr, bdaddr);
7930 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7931 ev->rssi = rssi;
7932
7933 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7934 name_len);
7935
7936 ev->eir_len = cpu_to_le16(eir_len);
7937
7938 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7939}
7940
7941void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7942{
7943 struct mgmt_ev_discovering ev;
7944
7945 BT_DBG("%s discovering %u", hdev->name, discovering);
7946
7947 memset(&ev, 0, sizeof(ev));
7948 ev.type = hdev->discovery.type;
7949 ev.discovering = discovering;
7950
7951 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7952}
7953
7954static struct hci_mgmt_chan chan = {
7955 .channel = HCI_CHANNEL_CONTROL,
7956 .handler_count = ARRAY_SIZE(mgmt_handlers),
7957 .handlers = mgmt_handlers,
7958 .hdev_init = mgmt_init_hdev,
7959};
7960
7961int mgmt_init(void)
7962{
7963 return hci_mgmt_chan_register(&chan);
7964}
7965
7966void mgmt_exit(void)
7967{
7968 hci_mgmt_chan_unregister(&chan);
7969}