blob: 9da32a12023183cd5e30909076b56574897011e1 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <linux/sched/signal.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
49bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
54static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
92int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb) {
111 kfree_skb(hdev->req_skb);
112 hdev->req_skb = skb_get(skb);
113 }
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
118void hci_req_sync_cancel(struct hci_dev *hdev, int err)
119{
120 BT_DBG("%s err 0x%2.2x", hdev->name, err);
121
122 if (hdev->req_status == HCI_REQ_PEND) {
123 hdev->req_result = err;
124 hdev->req_status = HCI_REQ_CANCELED;
125 wake_up_interruptible(&hdev->req_wait_q);
126 }
127}
128
129struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
130 const void *param, u8 event, u32 timeout)
131{
132 struct hci_request req;
133 struct sk_buff *skb;
134 int err = 0;
135
136 BT_DBG("%s", hdev->name);
137
138 hci_req_init(&req, hdev);
139
140 hci_req_add_ev(&req, opcode, plen, param, event);
141
142 hdev->req_status = HCI_REQ_PEND;
143
144 err = hci_req_run_skb(&req, hci_req_sync_complete);
145 if (err < 0)
146 return ERR_PTR(err);
147
148 err = wait_event_interruptible_timeout(hdev->req_wait_q,
149 hdev->req_status != HCI_REQ_PEND, timeout);
150
151 if (err == -ERESTARTSYS)
152 return ERR_PTR(-EINTR);
153
154 switch (hdev->req_status) {
155 case HCI_REQ_DONE:
156 err = -bt_to_errno(hdev->req_result);
157 break;
158
159 case HCI_REQ_CANCELED:
160 err = -hdev->req_result;
161 break;
162
163 default:
164 err = -ETIMEDOUT;
165 break;
166 }
167
168 hdev->req_status = hdev->req_result = 0;
169 skb = hdev->req_skb;
170 hdev->req_skb = NULL;
171
172 BT_DBG("%s end: err %d", hdev->name, err);
173
174 if (err < 0) {
175 kfree_skb(skb);
176 return ERR_PTR(err);
177 }
178
179 if (!skb)
180 return ERR_PTR(-ENODATA);
181
182 return skb;
183}
184EXPORT_SYMBOL(__hci_cmd_sync_ev);
185
186struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
187 const void *param, u32 timeout)
188{
189 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
190}
191EXPORT_SYMBOL(__hci_cmd_sync);
192
193/* Execute request and wait for completion. */
194int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
195 unsigned long opt),
196 unsigned long opt, u32 timeout, u8 *hci_status)
197{
198 struct hci_request req;
199 int err = 0;
200
201 BT_DBG("%s start", hdev->name);
202
203 hci_req_init(&req, hdev);
204
205 hdev->req_status = HCI_REQ_PEND;
206
207 err = func(&req, opt);
208 if (err) {
209 if (hci_status)
210 *hci_status = HCI_ERROR_UNSPECIFIED;
211 return err;
212 }
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
226 return 0;
227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
231
232 return err;
233 }
234
235 err = wait_event_interruptible_timeout(hdev->req_wait_q,
236 hdev->req_status != HCI_REQ_PEND, timeout);
237
238 if (err == -ERESTARTSYS)
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
243 err = -bt_to_errno(hdev->req_result);
244 if (hci_status)
245 *hci_status = hdev->req_result;
246 break;
247
248 case HCI_REQ_CANCELED:
249 err = -hdev->req_result;
250 if (hci_status)
251 *hci_status = HCI_ERROR_UNSPECIFIED;
252 break;
253
254 default:
255 err = -ETIMEDOUT;
256 if (hci_status)
257 *hci_status = HCI_ERROR_UNSPECIFIED;
258 break;
259 }
260
261 kfree_skb(hdev->req_skb);
262 hdev->req_skb = NULL;
263 hdev->req_status = hdev->req_result = 0;
264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
267 return err;
268}
269
270int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
272 unsigned long opt, u32 timeout, u8 *hci_status)
273{
274 int ret;
275
276 /* Serialize all requests */
277 hci_req_sync_lock(hdev);
278 /* check the state after obtaing the lock to protect the HCI_UP
279 * against any races from hci_dev_do_close when the controller
280 * gets removed.
281 */
282 if (test_bit(HCI_UP, &hdev->flags))
283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
284 else
285 ret = -ENETDOWN;
286 hci_req_sync_unlock(hdev);
287
288 return ret;
289}
290
291struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param)
293{
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
296 struct sk_buff *skb;
297
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
302 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
303 hdr->opcode = cpu_to_le16(opcode);
304 hdr->plen = plen;
305
306 if (plen)
307 skb_put_data(skb, param, plen);
308
309 BT_DBG("skb len %d", skb->len);
310
311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
313
314 return skb;
315}
316
317/* Queue a command to an asynchronous HCI request */
318void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
320{
321 struct hci_dev *hdev = req->hdev;
322 struct sk_buff *skb;
323
324 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
328 */
329 if (req->err)
330 return;
331
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 if (!skb) {
334 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
335 opcode);
336 req->err = -ENOMEM;
337 return;
338 }
339
340 if (skb_queue_empty(&req->cmd_q))
341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343 bt_cb(skb)->hci.req_event = event;
344
345 skb_queue_tail(&req->cmd_q, skb);
346}
347
348void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 const void *param)
350{
351 hci_req_add_ev(req, opcode, plen, param, 0);
352}
353
354void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355{
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
358 u8 type;
359
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 return;
362
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 return;
365
366 if (enable) {
367 type = PAGE_SCAN_TYPE_INTERLACED;
368
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
371 } else {
372 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374 /* default 1.28 sec page scan */
375 acp.interval = cpu_to_le16(0x0800);
376 }
377
378 acp.window = cpu_to_le16(0x0012);
379
380 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381 __cpu_to_le16(hdev->page_scan_window) != acp.window)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 sizeof(acp), &acp);
384
385 if (hdev->page_scan_type != type)
386 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387}
388
389/* This function controls the background scanning based on hdev->pend_le_conns
390 * list. If there are pending LE connection we start the background scanning,
391 * otherwise we stop it.
392 *
393 * This function requires the caller holds hdev->lock.
394 */
395static void __hci_update_background_scan(struct hci_request *req)
396{
397 struct hci_dev *hdev = req->hdev;
398
399 if (!test_bit(HCI_UP, &hdev->flags) ||
400 test_bit(HCI_INIT, &hdev->flags) ||
401 hci_dev_test_flag(hdev, HCI_SETUP) ||
402 hci_dev_test_flag(hdev, HCI_CONFIG) ||
403 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404 hci_dev_test_flag(hdev, HCI_UNREGISTER))
405 return;
406
407 /* No point in doing scanning if LE support hasn't been enabled */
408 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409 return;
410
411 /* If discovery is active don't interfere with it */
412 if (hdev->discovery.state != DISCOVERY_STOPPED)
413 return;
414
415 /* Reset RSSI and UUID filters when starting background scanning
416 * since these filters are meant for service discovery only.
417 *
418 * The Start Discovery and Start Service Discovery operations
419 * ensure to set proper values for RSSI threshold and UUID
420 * filter list. So it is safe to just reset them here.
421 */
422 hci_discovery_filter_clear(hdev);
423
424 if (list_empty(&hdev->pend_le_conns) &&
425 list_empty(&hdev->pend_le_reports)) {
426 /* If there is no pending LE connections or devices
427 * to be scanned for, we should stop the background
428 * scanning.
429 */
430
431 /* If controller is not scanning we are done. */
432 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 return;
434
435 hci_req_add_le_scan_disable(req);
436
437 BT_DBG("%s stopping background scanning", hdev->name);
438 } else {
439 /* If there is at least one pending LE connection, we should
440 * keep the background scan running.
441 */
442
443 /* If controller is connecting, we should not start scanning
444 * since some controllers are not able to scan and connect at
445 * the same time.
446 */
447 if (hci_lookup_le_connect(hdev))
448 return;
449
450 /* If controller is currently scanning, we stop it to ensure we
451 * don't miss any advertising (due to duplicates filter).
452 */
453 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454 hci_req_add_le_scan_disable(req);
455
456 hci_req_add_le_passive_scan(req);
457
458 BT_DBG("%s starting background scanning", hdev->name);
459 }
460}
461
462void __hci_req_update_name(struct hci_request *req)
463{
464 struct hci_dev *hdev = req->hdev;
465 struct hci_cp_write_local_name cp;
466
467 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470}
471
472#define PNP_INFO_SVCLASS_ID 0x1200
473
474static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475{
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
478
479 if (len < 4)
480 return ptr;
481
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 u16 uuid16;
484
485 if (uuid->size != 16)
486 continue;
487
488 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 if (uuid16 < 0x1100)
490 continue;
491
492 if (uuid16 == PNP_INFO_SVCLASS_ID)
493 continue;
494
495 if (!uuids_start) {
496 uuids_start = ptr;
497 uuids_start[0] = 1;
498 uuids_start[1] = EIR_UUID16_ALL;
499 ptr += 2;
500 }
501
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u16) > len) {
504 uuids_start[1] = EIR_UUID16_SOME;
505 break;
506 }
507
508 *ptr++ = (uuid16 & 0x00ff);
509 *ptr++ = (uuid16 & 0xff00) >> 8;
510 uuids_start[0] += sizeof(uuid16);
511 }
512
513 return ptr;
514}
515
516static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517{
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 6)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 32)
526 continue;
527
528 if (!uuids_start) {
529 uuids_start = ptr;
530 uuids_start[0] = 1;
531 uuids_start[1] = EIR_UUID32_ALL;
532 ptr += 2;
533 }
534
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + sizeof(u32) > len) {
537 uuids_start[1] = EIR_UUID32_SOME;
538 break;
539 }
540
541 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542 ptr += sizeof(u32);
543 uuids_start[0] += sizeof(u32);
544 }
545
546 return ptr;
547}
548
549static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550{
551 u8 *ptr = data, *uuids_start = NULL;
552 struct bt_uuid *uuid;
553
554 if (len < 18)
555 return ptr;
556
557 list_for_each_entry(uuid, &hdev->uuids, list) {
558 if (uuid->size != 128)
559 continue;
560
561 if (!uuids_start) {
562 uuids_start = ptr;
563 uuids_start[0] = 1;
564 uuids_start[1] = EIR_UUID128_ALL;
565 ptr += 2;
566 }
567
568 /* Stop if not enough space to put next UUID */
569 if ((ptr - data) + 16 > len) {
570 uuids_start[1] = EIR_UUID128_SOME;
571 break;
572 }
573
574 memcpy(ptr, uuid->uuid, 16);
575 ptr += 16;
576 uuids_start[0] += 16;
577 }
578
579 return ptr;
580}
581
582static void create_eir(struct hci_dev *hdev, u8 *data)
583{
584 u8 *ptr = data;
585 size_t name_len;
586
587 name_len = strlen(hdev->dev_name);
588
589 if (name_len > 0) {
590 /* EIR Data type */
591 if (name_len > 48) {
592 name_len = 48;
593 ptr[1] = EIR_NAME_SHORT;
594 } else
595 ptr[1] = EIR_NAME_COMPLETE;
596
597 /* EIR Data length */
598 ptr[0] = name_len + 1;
599
600 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602 ptr += (name_len + 2);
603 }
604
605 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606 ptr[0] = 2;
607 ptr[1] = EIR_TX_POWER;
608 ptr[2] = (u8) hdev->inq_tx_power;
609
610 ptr += 3;
611 }
612
613 if (hdev->devid_source > 0) {
614 ptr[0] = 9;
615 ptr[1] = EIR_DEVICE_ID;
616
617 put_unaligned_le16(hdev->devid_source, ptr + 2);
618 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619 put_unaligned_le16(hdev->devid_product, ptr + 6);
620 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622 ptr += 10;
623 }
624
625 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628}
629
630void __hci_req_update_eir(struct hci_request *req)
631{
632 struct hci_dev *hdev = req->hdev;
633 struct hci_cp_write_eir cp;
634
635 if (!hdev_is_powered(hdev))
636 return;
637
638 if (!lmp_ext_inq_capable(hdev))
639 return;
640
641 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 return;
643
644 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 return;
646
647 memset(&cp, 0, sizeof(cp));
648
649 create_eir(hdev, cp.data);
650
651 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 return;
653
654 memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657}
658
659void hci_req_add_le_scan_disable(struct hci_request *req)
660{
661 struct hci_dev *hdev = req->hdev;
662
663 if (use_ext_scan(hdev)) {
664 struct hci_cp_le_set_ext_scan_enable cp;
665
666 memset(&cp, 0, sizeof(cp));
667 cp.enable = LE_SCAN_DISABLE;
668 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
669 &cp);
670 } else {
671 struct hci_cp_le_set_scan_enable cp;
672
673 memset(&cp, 0, sizeof(cp));
674 cp.enable = LE_SCAN_DISABLE;
675 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
676 }
677}
678
679static void add_to_white_list(struct hci_request *req,
680 struct hci_conn_params *params)
681{
682 struct hci_cp_le_add_to_white_list cp;
683
684 cp.bdaddr_type = params->addr_type;
685 bacpy(&cp.bdaddr, &params->addr);
686
687 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
688}
689
690static u8 update_white_list(struct hci_request *req)
691{
692 struct hci_dev *hdev = req->hdev;
693 struct hci_conn_params *params;
694 struct bdaddr_list *b;
695 uint8_t white_list_entries = 0;
696
697 /* Go through the current white list programmed into the
698 * controller one by one and check if that address is still
699 * in the list of pending connections or list of devices to
700 * report. If not present in either list, then queue the
701 * command to remove it from the controller.
702 */
703 list_for_each_entry(b, &hdev->le_white_list, list) {
704 /* If the device is neither in pend_le_conns nor
705 * pend_le_reports then remove it from the whitelist.
706 */
707 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
708 &b->bdaddr, b->bdaddr_type) &&
709 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
710 &b->bdaddr, b->bdaddr_type)) {
711 struct hci_cp_le_del_from_white_list cp;
712
713 cp.bdaddr_type = b->bdaddr_type;
714 bacpy(&cp.bdaddr, &b->bdaddr);
715
716 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
717 sizeof(cp), &cp);
718 continue;
719 }
720
721 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
722 /* White list can not be used with RPAs */
723 return 0x00;
724 }
725
726 white_list_entries++;
727 }
728
729 /* Since all no longer valid white list entries have been
730 * removed, walk through the list of pending connections
731 * and ensure that any new device gets programmed into
732 * the controller.
733 *
734 * If the list of the devices is larger than the list of
735 * available white list entries in the controller, then
736 * just abort and return filer policy value to not use the
737 * white list.
738 */
739 list_for_each_entry(params, &hdev->pend_le_conns, action) {
740 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
741 &params->addr, params->addr_type))
742 continue;
743
744 if (white_list_entries >= hdev->le_white_list_size) {
745 /* Select filter policy to accept all advertising */
746 return 0x00;
747 }
748
749 if (hci_find_irk_by_addr(hdev, &params->addr,
750 params->addr_type)) {
751 /* White list can not be used with RPAs */
752 return 0x00;
753 }
754
755 white_list_entries++;
756 add_to_white_list(req, params);
757 }
758
759 /* After adding all new pending connections, walk through
760 * the list of pending reports and also add these to the
761 * white list if there is still space.
762 */
763 list_for_each_entry(params, &hdev->pend_le_reports, action) {
764 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
765 &params->addr, params->addr_type))
766 continue;
767
768 if (white_list_entries >= hdev->le_white_list_size) {
769 /* Select filter policy to accept all advertising */
770 return 0x00;
771 }
772
773 if (hci_find_irk_by_addr(hdev, &params->addr,
774 params->addr_type)) {
775 /* White list can not be used with RPAs */
776 return 0x00;
777 }
778
779 white_list_entries++;
780 add_to_white_list(req, params);
781 }
782
783 /* Select filter policy to use white list */
784 return 0x01;
785}
786
787static bool scan_use_rpa(struct hci_dev *hdev)
788{
789 return hci_dev_test_flag(hdev, HCI_PRIVACY);
790}
791
792static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
793 u16 window, u8 own_addr_type, u8 filter_policy)
794{
795 struct hci_dev *hdev = req->hdev;
796
797 /* Use ext scanning if set ext scan param and ext scan enable is
798 * supported
799 */
800 if (use_ext_scan(hdev)) {
801 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
802 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
803 struct hci_cp_le_scan_phy_params *phy_params;
804 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
805 u32 plen;
806
807 ext_param_cp = (void *)data;
808 phy_params = (void *)ext_param_cp->data;
809
810 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
811 ext_param_cp->own_addr_type = own_addr_type;
812 ext_param_cp->filter_policy = filter_policy;
813
814 plen = sizeof(*ext_param_cp);
815
816 if (scan_1m(hdev) || scan_2m(hdev)) {
817 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
818
819 memset(phy_params, 0, sizeof(*phy_params));
820 phy_params->type = type;
821 phy_params->interval = cpu_to_le16(interval);
822 phy_params->window = cpu_to_le16(window);
823
824 plen += sizeof(*phy_params);
825 phy_params++;
826 }
827
828 if (scan_coded(hdev)) {
829 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
830
831 memset(phy_params, 0, sizeof(*phy_params));
832 phy_params->type = type;
833 phy_params->interval = cpu_to_le16(interval);
834 phy_params->window = cpu_to_le16(window);
835
836 plen += sizeof(*phy_params);
837 phy_params++;
838 }
839
840 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
841 plen, ext_param_cp);
842
843 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
844 ext_enable_cp.enable = LE_SCAN_ENABLE;
845 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
846
847 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
848 sizeof(ext_enable_cp), &ext_enable_cp);
849 } else {
850 struct hci_cp_le_set_scan_param param_cp;
851 struct hci_cp_le_set_scan_enable enable_cp;
852
853 memset(&param_cp, 0, sizeof(param_cp));
854 param_cp.type = type;
855 param_cp.interval = cpu_to_le16(interval);
856 param_cp.window = cpu_to_le16(window);
857 param_cp.own_address_type = own_addr_type;
858 param_cp.filter_policy = filter_policy;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
860 &param_cp);
861
862 memset(&enable_cp, 0, sizeof(enable_cp));
863 enable_cp.enable = LE_SCAN_ENABLE;
864 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
865 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
866 &enable_cp);
867 }
868}
869
870void hci_req_add_le_passive_scan(struct hci_request *req)
871{
872 struct hci_dev *hdev = req->hdev;
873 u8 own_addr_type;
874 u8 filter_policy;
875
876 /* Set require_privacy to false since no SCAN_REQ are send
877 * during passive scanning. Not using an non-resolvable address
878 * here is important so that peer devices using direct
879 * advertising with our address will be correctly reported
880 * by the controller.
881 */
882 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
883 &own_addr_type))
884 return;
885
886 /* Adding or removing entries from the white list must
887 * happen before enabling scanning. The controller does
888 * not allow white list modification while scanning.
889 */
890 filter_policy = update_white_list(req);
891
892 /* When the controller is using random resolvable addresses and
893 * with that having LE privacy enabled, then controllers with
894 * Extended Scanner Filter Policies support can now enable support
895 * for handling directed advertising.
896 *
897 * So instead of using filter polices 0x00 (no whitelist)
898 * and 0x01 (whitelist enabled) use the new filter policies
899 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
900 */
901 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
902 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
903 filter_policy |= 0x02;
904
905 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
906 hdev->le_scan_window, own_addr_type, filter_policy);
907}
908
909static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
910{
911 struct adv_info *adv_instance;
912
913 /* Ignore instance 0 */
914 if (instance == 0x00)
915 return 0;
916
917 adv_instance = hci_find_adv_instance(hdev, instance);
918 if (!adv_instance)
919 return 0;
920
921 /* TODO: Take into account the "appearance" and "local-name" flags here.
922 * These are currently being ignored as they are not supported.
923 */
924 return adv_instance->scan_rsp_len;
925}
926
927static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
928{
929 u8 instance = hdev->cur_adv_instance;
930 struct adv_info *adv_instance;
931
932 /* Ignore instance 0 */
933 if (instance == 0x00)
934 return 0;
935
936 adv_instance = hci_find_adv_instance(hdev, instance);
937 if (!adv_instance)
938 return 0;
939
940 /* TODO: Take into account the "appearance" and "local-name" flags here.
941 * These are currently being ignored as they are not supported.
942 */
943 return adv_instance->scan_rsp_len;
944}
945
946void __hci_req_disable_advertising(struct hci_request *req)
947{
948 if (ext_adv_capable(req->hdev)) {
949 struct hci_cp_le_set_ext_adv_enable cp;
950
951 cp.enable = 0x00;
952 /* Disable all sets since we only support one set at the moment */
953 cp.num_of_sets = 0x00;
954
955 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
956 } else {
957 u8 enable = 0x00;
958
959 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
960 }
961}
962
963static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
964{
965 u32 flags;
966 struct adv_info *adv_instance;
967
968 if (instance == 0x00) {
969 /* Instance 0 always manages the "Tx Power" and "Flags"
970 * fields
971 */
972 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
973
974 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
975 * corresponds to the "connectable" instance flag.
976 */
977 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
978 flags |= MGMT_ADV_FLAG_CONNECTABLE;
979
980 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
981 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
982 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
983 flags |= MGMT_ADV_FLAG_DISCOV;
984
985 return flags;
986 }
987
988 adv_instance = hci_find_adv_instance(hdev, instance);
989
990 /* Return 0 when we got an invalid instance identifier. */
991 if (!adv_instance)
992 return 0;
993
994 return adv_instance->flags;
995}
996
997static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
998{
999 /* If privacy is not enabled don't use RPA */
1000 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1001 return false;
1002
1003 /* If basic privacy mode is enabled use RPA */
1004 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1005 return true;
1006
1007 /* If limited privacy mode is enabled don't use RPA if we're
1008 * both discoverable and bondable.
1009 */
1010 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1011 hci_dev_test_flag(hdev, HCI_BONDABLE))
1012 return false;
1013
1014 /* We're neither bondable nor discoverable in the limited
1015 * privacy mode, therefore use RPA.
1016 */
1017 return true;
1018}
1019
1020static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1021{
1022 /* If there is no connection we are OK to advertise. */
1023 if (hci_conn_num(hdev, LE_LINK) == 0)
1024 return true;
1025
1026 /* Check le_states if there is any connection in slave role. */
1027 if (hdev->conn_hash.le_num_slave > 0) {
1028 /* Slave connection state and non connectable mode bit 20. */
1029 if (!connectable && !(hdev->le_states[2] & 0x10))
1030 return false;
1031
1032 /* Slave connection state and connectable mode bit 38
1033 * and scannable bit 21.
1034 */
1035 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1036 !(hdev->le_states[2] & 0x20)))
1037 return false;
1038 }
1039
1040 /* Check le_states if there is any connection in master role. */
1041 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1042 /* Master connection state and non connectable mode bit 18. */
1043 if (!connectable && !(hdev->le_states[2] & 0x02))
1044 return false;
1045
1046 /* Master connection state and connectable mode bit 35 and
1047 * scannable 19.
1048 */
1049 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1050 !(hdev->le_states[2] & 0x08)))
1051 return false;
1052 }
1053
1054 return true;
1055}
1056
1057void __hci_req_enable_advertising(struct hci_request *req)
1058{
1059 struct hci_dev *hdev = req->hdev;
1060 struct hci_cp_le_set_adv_param cp;
1061 u8 own_addr_type, enable = 0x01;
1062 bool connectable;
1063 u16 adv_min_interval, adv_max_interval;
1064 u32 flags;
1065
1066 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1067
1068 /* If the "connectable" instance flag was not set, then choose between
1069 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1070 */
1071 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1072 mgmt_get_connectable(hdev);
1073
1074 if (!is_advertising_allowed(hdev, connectable))
1075 return;
1076
1077 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1078 __hci_req_disable_advertising(req);
1079
1080 /* Clear the HCI_LE_ADV bit temporarily so that the
1081 * hci_update_random_address knows that it's safe to go ahead
1082 * and write a new random address. The flag will be set back on
1083 * as soon as the SET_ADV_ENABLE HCI command completes.
1084 */
1085 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1086
1087 /* Set require_privacy to true only when non-connectable
1088 * advertising is used. In that case it is fine to use a
1089 * non-resolvable private address.
1090 */
1091 if (hci_update_random_address(req, !connectable,
1092 adv_use_rpa(hdev, flags),
1093 &own_addr_type) < 0)
1094 return;
1095
1096 memset(&cp, 0, sizeof(cp));
1097
1098 if (connectable) {
1099 cp.type = LE_ADV_IND;
1100
1101 adv_min_interval = hdev->le_adv_min_interval;
1102 adv_max_interval = hdev->le_adv_max_interval;
1103 } else {
1104 if (get_cur_adv_instance_scan_rsp_len(hdev))
1105 cp.type = LE_ADV_SCAN_IND;
1106 else
1107 cp.type = LE_ADV_NONCONN_IND;
1108
1109 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1110 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1111 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1112 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1113 } else {
1114 adv_min_interval = hdev->le_adv_min_interval;
1115 adv_max_interval = hdev->le_adv_max_interval;
1116 }
1117 }
1118
1119 cp.min_interval = cpu_to_le16(adv_min_interval);
1120 cp.max_interval = cpu_to_le16(adv_max_interval);
1121 cp.own_address_type = own_addr_type;
1122 cp.channel_map = hdev->le_adv_channel_map;
1123
1124 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1125
1126 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1127}
1128
1129u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1130{
1131 size_t short_len;
1132 size_t complete_len;
1133
1134 /* no space left for name (+ NULL + type + len) */
1135 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1136 return ad_len;
1137
1138 /* use complete name if present and fits */
1139 complete_len = strlen(hdev->dev_name);
1140 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1141 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1142 hdev->dev_name, complete_len + 1);
1143
1144 /* use short name if present */
1145 short_len = strlen(hdev->short_name);
1146 if (short_len)
1147 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1148 hdev->short_name, short_len + 1);
1149
1150 /* use shortened full name if present, we already know that name
1151 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1152 */
1153 if (complete_len) {
1154 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1155
1156 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1157 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1158
1159 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1160 sizeof(name));
1161 }
1162
1163 return ad_len;
1164}
1165
1166static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1167{
1168 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1169}
1170
1171static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1172{
1173 u8 scan_rsp_len = 0;
1174
1175 if (hdev->appearance) {
1176 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1177 }
1178
1179 return append_local_name(hdev, ptr, scan_rsp_len);
1180}
1181
1182static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1183 u8 *ptr)
1184{
1185 struct adv_info *adv_instance;
1186 u32 instance_flags;
1187 u8 scan_rsp_len = 0;
1188
1189 adv_instance = hci_find_adv_instance(hdev, instance);
1190 if (!adv_instance)
1191 return 0;
1192
1193 instance_flags = adv_instance->flags;
1194
1195 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1196 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1197 }
1198
1199 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1200 adv_instance->scan_rsp_len);
1201
1202 scan_rsp_len += adv_instance->scan_rsp_len;
1203
1204 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1205 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1206
1207 return scan_rsp_len;
1208}
1209
1210void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1211{
1212 struct hci_dev *hdev = req->hdev;
1213 u8 len;
1214
1215 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1216 return;
1217
1218 if (ext_adv_capable(hdev)) {
1219 struct hci_cp_le_set_ext_scan_rsp_data cp;
1220
1221 memset(&cp, 0, sizeof(cp));
1222
1223 if (instance)
1224 len = create_instance_scan_rsp_data(hdev, instance,
1225 cp.data);
1226 else
1227 len = create_default_scan_rsp_data(hdev, cp.data);
1228
1229 if (hdev->scan_rsp_data_len == len &&
1230 !memcmp(cp.data, hdev->scan_rsp_data, len))
1231 return;
1232
1233 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1234 hdev->scan_rsp_data_len = len;
1235
1236 cp.handle = 0;
1237 cp.length = len;
1238 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1239 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1240
1241 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1242 &cp);
1243 } else {
1244 struct hci_cp_le_set_scan_rsp_data cp;
1245
1246 memset(&cp, 0, sizeof(cp));
1247
1248 if (instance)
1249 len = create_instance_scan_rsp_data(hdev, instance,
1250 cp.data);
1251 else
1252 len = create_default_scan_rsp_data(hdev, cp.data);
1253
1254 if (hdev->scan_rsp_data_len == len &&
1255 !memcmp(cp.data, hdev->scan_rsp_data, len))
1256 return;
1257
1258 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1259 hdev->scan_rsp_data_len = len;
1260
1261 cp.length = len;
1262
1263 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1264 }
1265}
1266
1267static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1268{
1269 struct adv_info *adv_instance = NULL;
1270 u8 ad_len = 0, flags = 0;
1271 u32 instance_flags;
1272
1273 /* Return 0 when the current instance identifier is invalid. */
1274 if (instance) {
1275 adv_instance = hci_find_adv_instance(hdev, instance);
1276 if (!adv_instance)
1277 return 0;
1278 }
1279
1280 instance_flags = get_adv_instance_flags(hdev, instance);
1281
1282 /* If instance already has the flags set skip adding it once
1283 * again.
1284 */
1285 if (adv_instance && eir_get_data(adv_instance->adv_data,
1286 adv_instance->adv_data_len, EIR_FLAGS,
1287 NULL))
1288 goto skip_flags;
1289
1290 /* The Add Advertising command allows userspace to set both the general
1291 * and limited discoverable flags.
1292 */
1293 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1294 flags |= LE_AD_GENERAL;
1295
1296 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1297 flags |= LE_AD_LIMITED;
1298
1299 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1300 flags |= LE_AD_NO_BREDR;
1301
1302 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1303 /* If a discovery flag wasn't provided, simply use the global
1304 * settings.
1305 */
1306 if (!flags)
1307 flags |= mgmt_get_adv_discov_flags(hdev);
1308
1309 /* If flags would still be empty, then there is no need to
1310 * include the "Flags" AD field".
1311 */
1312 if (flags) {
1313 ptr[0] = 0x02;
1314 ptr[1] = EIR_FLAGS;
1315 ptr[2] = flags;
1316
1317 ad_len += 3;
1318 ptr += 3;
1319 }
1320 }
1321
1322skip_flags:
1323 if (adv_instance) {
1324 memcpy(ptr, adv_instance->adv_data,
1325 adv_instance->adv_data_len);
1326 ad_len += adv_instance->adv_data_len;
1327 ptr += adv_instance->adv_data_len;
1328 }
1329
1330 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1331 s8 adv_tx_power;
1332
1333 if (ext_adv_capable(hdev)) {
1334 if (adv_instance)
1335 adv_tx_power = adv_instance->tx_power;
1336 else
1337 adv_tx_power = hdev->adv_tx_power;
1338 } else {
1339 adv_tx_power = hdev->adv_tx_power;
1340 }
1341
1342 /* Provide Tx Power only if we can provide a valid value for it */
1343 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1344 ptr[0] = 0x02;
1345 ptr[1] = EIR_TX_POWER;
1346 ptr[2] = (u8)adv_tx_power;
1347
1348 ad_len += 3;
1349 ptr += 3;
1350 }
1351 }
1352
1353 return ad_len;
1354}
1355
1356void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1357{
1358 struct hci_dev *hdev = req->hdev;
1359 u8 len;
1360
1361 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1362 return;
1363
1364 if (ext_adv_capable(hdev)) {
1365 struct hci_cp_le_set_ext_adv_data cp;
1366
1367 memset(&cp, 0, sizeof(cp));
1368
1369 len = create_instance_adv_data(hdev, instance, cp.data);
1370
1371 /* There's nothing to do if the data hasn't changed */
1372 if (hdev->adv_data_len == len &&
1373 memcmp(cp.data, hdev->adv_data, len) == 0)
1374 return;
1375
1376 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1377 hdev->adv_data_len = len;
1378
1379 cp.length = len;
1380 cp.handle = 0;
1381 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1382 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1383
1384 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1385 } else {
1386 struct hci_cp_le_set_adv_data cp;
1387
1388 memset(&cp, 0, sizeof(cp));
1389
1390 len = create_instance_adv_data(hdev, instance, cp.data);
1391
1392 /* There's nothing to do if the data hasn't changed */
1393 if (hdev->adv_data_len == len &&
1394 memcmp(cp.data, hdev->adv_data, len) == 0)
1395 return;
1396
1397 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1398 hdev->adv_data_len = len;
1399
1400 cp.length = len;
1401
1402 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1403 }
1404}
1405
1406int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1407{
1408 struct hci_request req;
1409
1410 hci_req_init(&req, hdev);
1411 __hci_req_update_adv_data(&req, instance);
1412
1413 return hci_req_run(&req, NULL);
1414}
1415
1416static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1417{
1418 BT_DBG("%s status %u", hdev->name, status);
1419}
1420
1421void hci_req_reenable_advertising(struct hci_dev *hdev)
1422{
1423 struct hci_request req;
1424
1425 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1426 list_empty(&hdev->adv_instances))
1427 return;
1428
1429 hci_req_init(&req, hdev);
1430
1431 if (hdev->cur_adv_instance) {
1432 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1433 true);
1434 } else {
1435 if (ext_adv_capable(hdev)) {
1436 __hci_req_start_ext_adv(&req, 0x00);
1437 } else {
1438 __hci_req_update_adv_data(&req, 0x00);
1439 __hci_req_update_scan_rsp_data(&req, 0x00);
1440 __hci_req_enable_advertising(&req);
1441 }
1442 }
1443
1444 hci_req_run(&req, adv_enable_complete);
1445}
1446
1447static void adv_timeout_expire(struct work_struct *work)
1448{
1449 struct hci_dev *hdev = container_of(work, struct hci_dev,
1450 adv_instance_expire.work);
1451
1452 struct hci_request req;
1453 u8 instance;
1454
1455 BT_DBG("%s", hdev->name);
1456
1457 hci_dev_lock(hdev);
1458
1459 hdev->adv_instance_timeout = 0;
1460
1461 instance = hdev->cur_adv_instance;
1462 if (instance == 0x00)
1463 goto unlock;
1464
1465 hci_req_init(&req, hdev);
1466
1467 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1468
1469 if (list_empty(&hdev->adv_instances))
1470 __hci_req_disable_advertising(&req);
1471
1472 hci_req_run(&req, NULL);
1473
1474unlock:
1475 hci_dev_unlock(hdev);
1476}
1477
1478int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1479 bool use_rpa, struct adv_info *adv_instance,
1480 u8 *own_addr_type, bdaddr_t *rand_addr)
1481{
1482 int err;
1483
1484 bacpy(rand_addr, BDADDR_ANY);
1485
1486 /* If privacy is enabled use a resolvable private address. If
1487 * current RPA has expired then generate a new one.
1488 */
1489 if (use_rpa) {
1490 int to;
1491
1492 *own_addr_type = ADDR_LE_DEV_RANDOM;
1493
1494 if (adv_instance) {
1495 if (!adv_instance->rpa_expired &&
1496 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1497 return 0;
1498
1499 adv_instance->rpa_expired = false;
1500 } else {
1501 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1502 !bacmp(&hdev->random_addr, &hdev->rpa))
1503 return 0;
1504 }
1505
1506 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1507 if (err < 0) {
1508 BT_ERR("%s failed to generate new RPA", hdev->name);
1509 return err;
1510 }
1511
1512 bacpy(rand_addr, &hdev->rpa);
1513
1514 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1515 if (adv_instance)
1516 queue_delayed_work(hdev->workqueue,
1517 &adv_instance->rpa_expired_cb, to);
1518 else
1519 queue_delayed_work(hdev->workqueue,
1520 &hdev->rpa_expired, to);
1521
1522 return 0;
1523 }
1524
1525 /* In case of required privacy without resolvable private address,
1526 * use an non-resolvable private address. This is useful for
1527 * non-connectable advertising.
1528 */
1529 if (require_privacy) {
1530 bdaddr_t nrpa;
1531
1532 while (true) {
1533 /* The non-resolvable private address is generated
1534 * from random six bytes with the two most significant
1535 * bits cleared.
1536 */
1537 get_random_bytes(&nrpa, 6);
1538 nrpa.b[5] &= 0x3f;
1539
1540 /* The non-resolvable private address shall not be
1541 * equal to the public address.
1542 */
1543 if (bacmp(&hdev->bdaddr, &nrpa))
1544 break;
1545 }
1546
1547 *own_addr_type = ADDR_LE_DEV_RANDOM;
1548 bacpy(rand_addr, &nrpa);
1549
1550 return 0;
1551 }
1552
1553 /* No privacy so use a public address. */
1554 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1555
1556 return 0;
1557}
1558
1559void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1560{
1561 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1562}
1563
1564int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1565{
1566 struct hci_cp_le_set_ext_adv_params cp;
1567 struct hci_dev *hdev = req->hdev;
1568 bool connectable;
1569 u32 flags;
1570 bdaddr_t random_addr;
1571 u8 own_addr_type;
1572 int err;
1573 struct adv_info *adv_instance;
1574 bool secondary_adv;
1575 /* In ext adv set param interval is 3 octets */
1576 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1577
1578 if (instance > 0) {
1579 adv_instance = hci_find_adv_instance(hdev, instance);
1580 if (!adv_instance)
1581 return -EINVAL;
1582 } else {
1583 adv_instance = NULL;
1584 }
1585
1586 flags = get_adv_instance_flags(hdev, instance);
1587
1588 /* If the "connectable" instance flag was not set, then choose between
1589 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1590 */
1591 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1592 mgmt_get_connectable(hdev);
1593
1594 if (!is_advertising_allowed(hdev, connectable))
1595 return -EPERM;
1596
1597 /* Set require_privacy to true only when non-connectable
1598 * advertising is used. In that case it is fine to use a
1599 * non-resolvable private address.
1600 */
1601 err = hci_get_random_address(hdev, !connectable,
1602 adv_use_rpa(hdev, flags), adv_instance,
1603 &own_addr_type, &random_addr);
1604 if (err < 0)
1605 return err;
1606
1607 memset(&cp, 0, sizeof(cp));
1608
1609 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1610 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1611
1612 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1613
1614 if (connectable) {
1615 if (secondary_adv)
1616 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1617 else
1618 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1619 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1620 if (secondary_adv)
1621 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1622 else
1623 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1624 } else {
1625 if (secondary_adv)
1626 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1627 else
1628 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1629 }
1630
1631 cp.own_addr_type = own_addr_type;
1632 cp.channel_map = hdev->le_adv_channel_map;
1633 cp.tx_power = 127;
1634 cp.handle = instance;
1635
1636 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1637 cp.primary_phy = HCI_ADV_PHY_1M;
1638 cp.secondary_phy = HCI_ADV_PHY_2M;
1639 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1640 cp.primary_phy = HCI_ADV_PHY_CODED;
1641 cp.secondary_phy = HCI_ADV_PHY_CODED;
1642 } else {
1643 /* In all other cases use 1M */
1644 cp.primary_phy = HCI_ADV_PHY_1M;
1645 cp.secondary_phy = HCI_ADV_PHY_1M;
1646 }
1647
1648 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1649
1650 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1651 bacmp(&random_addr, BDADDR_ANY)) {
1652 struct hci_cp_le_set_adv_set_rand_addr cp;
1653
1654 /* Check if random address need to be updated */
1655 if (adv_instance) {
1656 if (!bacmp(&random_addr, &adv_instance->random_addr))
1657 return 0;
1658 } else {
1659 if (!bacmp(&random_addr, &hdev->random_addr))
1660 return 0;
1661 }
1662
1663 memset(&cp, 0, sizeof(cp));
1664
1665 cp.handle = 0;
1666 bacpy(&cp.bdaddr, &random_addr);
1667
1668 hci_req_add(req,
1669 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1670 sizeof(cp), &cp);
1671 }
1672
1673 return 0;
1674}
1675
1676int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1677{
1678 struct hci_dev *hdev = req->hdev;
1679 struct hci_cp_le_set_ext_adv_enable *cp;
1680 struct hci_cp_ext_adv_set *adv_set;
1681 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1682 struct adv_info *adv_instance;
1683
1684 if (instance > 0) {
1685 adv_instance = hci_find_adv_instance(hdev, instance);
1686 if (!adv_instance)
1687 return -EINVAL;
1688 } else {
1689 adv_instance = NULL;
1690 }
1691
1692 cp = (void *) data;
1693 adv_set = (void *) cp->data;
1694
1695 memset(cp, 0, sizeof(*cp));
1696
1697 cp->enable = 0x01;
1698 cp->num_of_sets = 0x01;
1699
1700 memset(adv_set, 0, sizeof(*adv_set));
1701
1702 adv_set->handle = instance;
1703
1704 /* Set duration per instance since controller is responsible for
1705 * scheduling it.
1706 */
1707 if (adv_instance && adv_instance->duration) {
1708 u16 duration = adv_instance->duration * MSEC_PER_SEC;
1709
1710 /* Time = N * 10 ms */
1711 adv_set->duration = cpu_to_le16(duration / 10);
1712 }
1713
1714 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1715 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1716 data);
1717
1718 return 0;
1719}
1720
1721int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1722{
1723 struct hci_dev *hdev = req->hdev;
1724 int err;
1725
1726 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1727 __hci_req_disable_advertising(req);
1728
1729 err = __hci_req_setup_ext_adv_instance(req, instance);
1730 if (err < 0)
1731 return err;
1732
1733 __hci_req_update_scan_rsp_data(req, instance);
1734 __hci_req_enable_ext_advertising(req, instance);
1735
1736 return 0;
1737}
1738
1739int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1740 bool force)
1741{
1742 struct hci_dev *hdev = req->hdev;
1743 struct adv_info *adv_instance = NULL;
1744 u16 timeout;
1745
1746 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1747 list_empty(&hdev->adv_instances))
1748 return -EPERM;
1749
1750 if (hdev->adv_instance_timeout)
1751 return -EBUSY;
1752
1753 adv_instance = hci_find_adv_instance(hdev, instance);
1754 if (!adv_instance)
1755 return -ENOENT;
1756
1757 /* A zero timeout means unlimited advertising. As long as there is
1758 * only one instance, duration should be ignored. We still set a timeout
1759 * in case further instances are being added later on.
1760 *
1761 * If the remaining lifetime of the instance is more than the duration
1762 * then the timeout corresponds to the duration, otherwise it will be
1763 * reduced to the remaining instance lifetime.
1764 */
1765 if (adv_instance->timeout == 0 ||
1766 adv_instance->duration <= adv_instance->remaining_time)
1767 timeout = adv_instance->duration;
1768 else
1769 timeout = adv_instance->remaining_time;
1770
1771 /* The remaining time is being reduced unless the instance is being
1772 * advertised without time limit.
1773 */
1774 if (adv_instance->timeout)
1775 adv_instance->remaining_time =
1776 adv_instance->remaining_time - timeout;
1777
1778 /* Only use work for scheduling instances with legacy advertising */
1779 if (!ext_adv_capable(hdev)) {
1780 hdev->adv_instance_timeout = timeout;
1781 queue_delayed_work(hdev->req_workqueue,
1782 &hdev->adv_instance_expire,
1783 msecs_to_jiffies(timeout * 1000));
1784 }
1785
1786 /* If we're just re-scheduling the same instance again then do not
1787 * execute any HCI commands. This happens when a single instance is
1788 * being advertised.
1789 */
1790 if (!force && hdev->cur_adv_instance == instance &&
1791 hci_dev_test_flag(hdev, HCI_LE_ADV))
1792 return 0;
1793
1794 hdev->cur_adv_instance = instance;
1795 if (ext_adv_capable(hdev)) {
1796 __hci_req_start_ext_adv(req, instance);
1797 } else {
1798 __hci_req_update_adv_data(req, instance);
1799 __hci_req_update_scan_rsp_data(req, instance);
1800 __hci_req_enable_advertising(req);
1801 }
1802
1803 return 0;
1804}
1805
1806static void cancel_adv_timeout(struct hci_dev *hdev)
1807{
1808 if (hdev->adv_instance_timeout) {
1809 hdev->adv_instance_timeout = 0;
1810 cancel_delayed_work(&hdev->adv_instance_expire);
1811 }
1812}
1813
1814/* For a single instance:
1815 * - force == true: The instance will be removed even when its remaining
1816 * lifetime is not zero.
1817 * - force == false: the instance will be deactivated but kept stored unless
1818 * the remaining lifetime is zero.
1819 *
1820 * For instance == 0x00:
1821 * - force == true: All instances will be removed regardless of their timeout
1822 * setting.
1823 * - force == false: Only instances that have a timeout will be removed.
1824 */
1825void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1826 struct hci_request *req, u8 instance,
1827 bool force)
1828{
1829 struct adv_info *adv_instance, *n, *next_instance = NULL;
1830 int err;
1831 u8 rem_inst;
1832
1833 /* Cancel any timeout concerning the removed instance(s). */
1834 if (!instance || hdev->cur_adv_instance == instance)
1835 cancel_adv_timeout(hdev);
1836
1837 /* Get the next instance to advertise BEFORE we remove
1838 * the current one. This can be the same instance again
1839 * if there is only one instance.
1840 */
1841 if (instance && hdev->cur_adv_instance == instance)
1842 next_instance = hci_get_next_instance(hdev, instance);
1843
1844 if (instance == 0x00) {
1845 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1846 list) {
1847 if (!(force || adv_instance->timeout))
1848 continue;
1849
1850 rem_inst = adv_instance->instance;
1851 err = hci_remove_adv_instance(hdev, rem_inst);
1852 if (!err)
1853 mgmt_advertising_removed(sk, hdev, rem_inst);
1854 }
1855 } else {
1856 adv_instance = hci_find_adv_instance(hdev, instance);
1857
1858 if (force || (adv_instance && adv_instance->timeout &&
1859 !adv_instance->remaining_time)) {
1860 /* Don't advertise a removed instance. */
1861 if (next_instance &&
1862 next_instance->instance == instance)
1863 next_instance = NULL;
1864
1865 err = hci_remove_adv_instance(hdev, instance);
1866 if (!err)
1867 mgmt_advertising_removed(sk, hdev, instance);
1868 }
1869 }
1870
1871 if (!req || !hdev_is_powered(hdev) ||
1872 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1873 return;
1874
1875 if (next_instance)
1876 __hci_req_schedule_adv_instance(req, next_instance->instance,
1877 false);
1878}
1879
1880static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1881{
1882 struct hci_dev *hdev = req->hdev;
1883
1884 /* If we're advertising or initiating an LE connection we can't
1885 * go ahead and change the random address at this time. This is
1886 * because the eventual initiator address used for the
1887 * subsequently created connection will be undefined (some
1888 * controllers use the new address and others the one we had
1889 * when the operation started).
1890 *
1891 * In this kind of scenario skip the update and let the random
1892 * address be updated at the next cycle.
1893 */
1894 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1895 hci_lookup_le_connect(hdev)) {
1896 BT_DBG("Deferring random address update");
1897 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1898 return;
1899 }
1900
1901 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1902}
1903
1904int hci_update_random_address(struct hci_request *req, bool require_privacy,
1905 bool use_rpa, u8 *own_addr_type)
1906{
1907 struct hci_dev *hdev = req->hdev;
1908 int err;
1909
1910 /* If privacy is enabled use a resolvable private address. If
1911 * current RPA has expired or there is something else than
1912 * the current RPA in use, then generate a new one.
1913 */
1914 if (use_rpa) {
1915 int to;
1916
1917 *own_addr_type = ADDR_LE_DEV_RANDOM;
1918
1919 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1920 !bacmp(&hdev->random_addr, &hdev->rpa))
1921 return 0;
1922
1923 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1924 if (err < 0) {
1925 bt_dev_err(hdev, "failed to generate new RPA");
1926 return err;
1927 }
1928
1929 set_random_addr(req, &hdev->rpa);
1930
1931 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1932 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1933
1934 return 0;
1935 }
1936
1937 /* In case of required privacy without resolvable private address,
1938 * use an non-resolvable private address. This is useful for active
1939 * scanning and non-connectable advertising.
1940 */
1941 if (require_privacy) {
1942 bdaddr_t nrpa;
1943
1944 while (true) {
1945 /* The non-resolvable private address is generated
1946 * from random six bytes with the two most significant
1947 * bits cleared.
1948 */
1949 get_random_bytes(&nrpa, 6);
1950 nrpa.b[5] &= 0x3f;
1951
1952 /* The non-resolvable private address shall not be
1953 * equal to the public address.
1954 */
1955 if (bacmp(&hdev->bdaddr, &nrpa))
1956 break;
1957 }
1958
1959 *own_addr_type = ADDR_LE_DEV_RANDOM;
1960 set_random_addr(req, &nrpa);
1961 return 0;
1962 }
1963
1964 /* If forcing static address is in use or there is no public
1965 * address use the static address as random address (but skip
1966 * the HCI command if the current random address is already the
1967 * static one.
1968 *
1969 * In case BR/EDR has been disabled on a dual-mode controller
1970 * and a static address has been configured, then use that
1971 * address instead of the public BR/EDR address.
1972 */
1973 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1974 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1975 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1976 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1977 *own_addr_type = ADDR_LE_DEV_RANDOM;
1978 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1979 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1980 &hdev->static_addr);
1981 return 0;
1982 }
1983
1984 /* Neither privacy nor static address is being used so use a
1985 * public address.
1986 */
1987 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1988
1989 return 0;
1990}
1991
1992static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1993{
1994 struct bdaddr_list *b;
1995
1996 list_for_each_entry(b, &hdev->whitelist, list) {
1997 struct hci_conn *conn;
1998
1999 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2000 if (!conn)
2001 return true;
2002
2003 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2004 return true;
2005 }
2006
2007 return false;
2008}
2009
2010void __hci_req_update_scan(struct hci_request *req)
2011{
2012 struct hci_dev *hdev = req->hdev;
2013 u8 scan;
2014
2015 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2016 return;
2017
2018 if (!hdev_is_powered(hdev))
2019 return;
2020
2021 if (mgmt_powering_down(hdev))
2022 return;
2023
2024 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2025 disconnected_whitelist_entries(hdev))
2026 scan = SCAN_PAGE;
2027 else
2028 scan = SCAN_DISABLED;
2029
2030 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2031 scan |= SCAN_INQUIRY;
2032
2033 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2034 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2035 return;
2036
2037 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2038}
2039
2040static int update_scan(struct hci_request *req, unsigned long opt)
2041{
2042 hci_dev_lock(req->hdev);
2043 __hci_req_update_scan(req);
2044 hci_dev_unlock(req->hdev);
2045 return 0;
2046}
2047
2048static void scan_update_work(struct work_struct *work)
2049{
2050 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2051
2052 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2053}
2054
2055static int connectable_update(struct hci_request *req, unsigned long opt)
2056{
2057 struct hci_dev *hdev = req->hdev;
2058
2059 hci_dev_lock(hdev);
2060
2061 __hci_req_update_scan(req);
2062
2063 /* If BR/EDR is not enabled and we disable advertising as a
2064 * by-product of disabling connectable, we need to update the
2065 * advertising flags.
2066 */
2067 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2068 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2069
2070 /* Update the advertising parameters if necessary */
2071 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2072 !list_empty(&hdev->adv_instances)) {
2073 if (ext_adv_capable(hdev))
2074 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2075 else
2076 __hci_req_enable_advertising(req);
2077 }
2078
2079 __hci_update_background_scan(req);
2080
2081 hci_dev_unlock(hdev);
2082
2083 return 0;
2084}
2085
2086static void connectable_update_work(struct work_struct *work)
2087{
2088 struct hci_dev *hdev = container_of(work, struct hci_dev,
2089 connectable_update);
2090 u8 status;
2091
2092 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2093 mgmt_set_connectable_complete(hdev, status);
2094}
2095
2096static u8 get_service_classes(struct hci_dev *hdev)
2097{
2098 struct bt_uuid *uuid;
2099 u8 val = 0;
2100
2101 list_for_each_entry(uuid, &hdev->uuids, list)
2102 val |= uuid->svc_hint;
2103
2104 return val;
2105}
2106
2107void __hci_req_update_class(struct hci_request *req)
2108{
2109 struct hci_dev *hdev = req->hdev;
2110 u8 cod[3];
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 if (!hdev_is_powered(hdev))
2115 return;
2116
2117 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2118 return;
2119
2120 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2121 return;
2122
2123 cod[0] = hdev->minor_class;
2124 cod[1] = hdev->major_class;
2125 cod[2] = get_service_classes(hdev);
2126
2127 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2128 cod[1] |= 0x20;
2129
2130 if (memcmp(cod, hdev->dev_class, 3) == 0)
2131 return;
2132
2133 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2134}
2135
2136static void write_iac(struct hci_request *req)
2137{
2138 struct hci_dev *hdev = req->hdev;
2139 struct hci_cp_write_current_iac_lap cp;
2140
2141 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2142 return;
2143
2144 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2145 /* Limited discoverable mode */
2146 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2147 cp.iac_lap[0] = 0x00; /* LIAC */
2148 cp.iac_lap[1] = 0x8b;
2149 cp.iac_lap[2] = 0x9e;
2150 cp.iac_lap[3] = 0x33; /* GIAC */
2151 cp.iac_lap[4] = 0x8b;
2152 cp.iac_lap[5] = 0x9e;
2153 } else {
2154 /* General discoverable mode */
2155 cp.num_iac = 1;
2156 cp.iac_lap[0] = 0x33; /* GIAC */
2157 cp.iac_lap[1] = 0x8b;
2158 cp.iac_lap[2] = 0x9e;
2159 }
2160
2161 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2162 (cp.num_iac * 3) + 1, &cp);
2163}
2164
2165static int discoverable_update(struct hci_request *req, unsigned long opt)
2166{
2167 struct hci_dev *hdev = req->hdev;
2168
2169 hci_dev_lock(hdev);
2170
2171 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2172 write_iac(req);
2173 __hci_req_update_scan(req);
2174 __hci_req_update_class(req);
2175 }
2176
2177 /* Advertising instances don't use the global discoverable setting, so
2178 * only update AD if advertising was enabled using Set Advertising.
2179 */
2180 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2181 __hci_req_update_adv_data(req, 0x00);
2182
2183 /* Discoverable mode affects the local advertising
2184 * address in limited privacy mode.
2185 */
2186 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2187 if (ext_adv_capable(hdev))
2188 __hci_req_start_ext_adv(req, 0x00);
2189 else
2190 __hci_req_enable_advertising(req);
2191 }
2192 }
2193
2194 hci_dev_unlock(hdev);
2195
2196 return 0;
2197}
2198
2199static void discoverable_update_work(struct work_struct *work)
2200{
2201 struct hci_dev *hdev = container_of(work, struct hci_dev,
2202 discoverable_update);
2203 u8 status;
2204
2205 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2206 mgmt_set_discoverable_complete(hdev, status);
2207}
2208
2209void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2210 u8 reason)
2211{
2212 switch (conn->state) {
2213 case BT_CONNECTED:
2214 case BT_CONFIG:
2215 if (conn->type == AMP_LINK) {
2216 struct hci_cp_disconn_phy_link cp;
2217
2218 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2219 cp.reason = reason;
2220 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2221 &cp);
2222 } else {
2223 struct hci_cp_disconnect dc;
2224
2225 dc.handle = cpu_to_le16(conn->handle);
2226 dc.reason = reason;
2227 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2228 }
2229
2230 conn->state = BT_DISCONN;
2231
2232 break;
2233 case BT_CONNECT:
2234 if (conn->type == LE_LINK) {
2235 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2236 break;
2237 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2238 0, NULL);
2239 } else if (conn->type == ACL_LINK) {
2240 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2241 break;
2242 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2243 6, &conn->dst);
2244 }
2245 break;
2246 case BT_CONNECT2:
2247 if (conn->type == ACL_LINK) {
2248 struct hci_cp_reject_conn_req rej;
2249
2250 bacpy(&rej.bdaddr, &conn->dst);
2251 rej.reason = reason;
2252
2253 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2254 sizeof(rej), &rej);
2255 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2256 struct hci_cp_reject_sync_conn_req rej;
2257
2258 bacpy(&rej.bdaddr, &conn->dst);
2259
2260 /* SCO rejection has its own limited set of
2261 * allowed error values (0x0D-0x0F) which isn't
2262 * compatible with most values passed to this
2263 * function. To be safe hard-code one of the
2264 * values that's suitable for SCO.
2265 */
2266 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2267
2268 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2269 sizeof(rej), &rej);
2270 }
2271 break;
2272 default:
2273 conn->state = BT_CLOSED;
2274 break;
2275 }
2276}
2277
2278static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2279{
2280 if (status)
2281 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2282}
2283
2284int hci_abort_conn(struct hci_conn *conn, u8 reason)
2285{
2286 struct hci_request req;
2287 int err;
2288
2289 hci_req_init(&req, conn->hdev);
2290
2291 __hci_abort_conn(&req, conn, reason);
2292
2293 err = hci_req_run(&req, abort_conn_complete);
2294 if (err && err != -ENODATA) {
2295 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2296 return err;
2297 }
2298
2299 return 0;
2300}
2301
2302static int update_bg_scan(struct hci_request *req, unsigned long opt)
2303{
2304 hci_dev_lock(req->hdev);
2305 __hci_update_background_scan(req);
2306 hci_dev_unlock(req->hdev);
2307 return 0;
2308}
2309
2310static void bg_scan_update(struct work_struct *work)
2311{
2312 struct hci_dev *hdev = container_of(work, struct hci_dev,
2313 bg_scan_update);
2314 struct hci_conn *conn;
2315 u8 status;
2316 int err;
2317
2318 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2319 if (!err)
2320 return;
2321
2322 hci_dev_lock(hdev);
2323
2324 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2325 if (conn)
2326 hci_le_conn_failed(conn, status);
2327
2328 hci_dev_unlock(hdev);
2329}
2330
2331static int le_scan_disable(struct hci_request *req, unsigned long opt)
2332{
2333 hci_req_add_le_scan_disable(req);
2334 return 0;
2335}
2336
2337static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2338{
2339 u8 length = opt;
2340 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2341 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2342 struct hci_cp_inquiry cp;
2343
2344 BT_DBG("%s", req->hdev->name);
2345
2346 hci_dev_lock(req->hdev);
2347 hci_inquiry_cache_flush(req->hdev);
2348 hci_dev_unlock(req->hdev);
2349
2350 memset(&cp, 0, sizeof(cp));
2351
2352 if (req->hdev->discovery.limited)
2353 memcpy(&cp.lap, liac, sizeof(cp.lap));
2354 else
2355 memcpy(&cp.lap, giac, sizeof(cp.lap));
2356
2357 cp.length = length;
2358
2359 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2360
2361 return 0;
2362}
2363
2364static void le_scan_disable_work(struct work_struct *work)
2365{
2366 struct hci_dev *hdev = container_of(work, struct hci_dev,
2367 le_scan_disable.work);
2368 u8 status;
2369
2370 BT_DBG("%s", hdev->name);
2371
2372 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2373 return;
2374
2375 cancel_delayed_work(&hdev->le_scan_restart);
2376
2377 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2378 if (status) {
2379 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2380 status);
2381 return;
2382 }
2383
2384 hdev->discovery.scan_start = 0;
2385
2386 /* If we were running LE only scan, change discovery state. If
2387 * we were running both LE and BR/EDR inquiry simultaneously,
2388 * and BR/EDR inquiry is already finished, stop discovery,
2389 * otherwise BR/EDR inquiry will stop discovery when finished.
2390 * If we will resolve remote device name, do not change
2391 * discovery state.
2392 */
2393
2394 if (hdev->discovery.type == DISCOV_TYPE_LE)
2395 goto discov_stopped;
2396
2397 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2398 return;
2399
2400 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2401 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2402 hdev->discovery.state != DISCOVERY_RESOLVING)
2403 goto discov_stopped;
2404
2405 return;
2406 }
2407
2408 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2409 HCI_CMD_TIMEOUT, &status);
2410 if (status) {
2411 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2412 goto discov_stopped;
2413 }
2414
2415 return;
2416
2417discov_stopped:
2418 hci_dev_lock(hdev);
2419 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2420 hci_dev_unlock(hdev);
2421}
2422
2423static int le_scan_restart(struct hci_request *req, unsigned long opt)
2424{
2425 struct hci_dev *hdev = req->hdev;
2426
2427 /* If controller is not scanning we are done. */
2428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2429 return 0;
2430
2431 hci_req_add_le_scan_disable(req);
2432
2433 if (use_ext_scan(hdev)) {
2434 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2435
2436 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2437 ext_enable_cp.enable = LE_SCAN_ENABLE;
2438 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2439
2440 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2441 sizeof(ext_enable_cp), &ext_enable_cp);
2442 } else {
2443 struct hci_cp_le_set_scan_enable cp;
2444
2445 memset(&cp, 0, sizeof(cp));
2446 cp.enable = LE_SCAN_ENABLE;
2447 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2448 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2449 }
2450
2451 return 0;
2452}
2453
2454static void le_scan_restart_work(struct work_struct *work)
2455{
2456 struct hci_dev *hdev = container_of(work, struct hci_dev,
2457 le_scan_restart.work);
2458 unsigned long timeout, duration, scan_start, now;
2459 u8 status;
2460
2461 BT_DBG("%s", hdev->name);
2462
2463 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2464 if (status) {
2465 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2466 status);
2467 return;
2468 }
2469
2470 hci_dev_lock(hdev);
2471
2472 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2473 !hdev->discovery.scan_start)
2474 goto unlock;
2475
2476 /* When the scan was started, hdev->le_scan_disable has been queued
2477 * after duration from scan_start. During scan restart this job
2478 * has been canceled, and we need to queue it again after proper
2479 * timeout, to make sure that scan does not run indefinitely.
2480 */
2481 duration = hdev->discovery.scan_duration;
2482 scan_start = hdev->discovery.scan_start;
2483 now = jiffies;
2484 if (now - scan_start <= duration) {
2485 int elapsed;
2486
2487 if (now >= scan_start)
2488 elapsed = now - scan_start;
2489 else
2490 elapsed = ULONG_MAX - scan_start + now;
2491
2492 timeout = duration - elapsed;
2493 } else {
2494 timeout = 0;
2495 }
2496
2497 queue_delayed_work(hdev->req_workqueue,
2498 &hdev->le_scan_disable, timeout);
2499
2500unlock:
2501 hci_dev_unlock(hdev);
2502}
2503
2504static int active_scan(struct hci_request *req, unsigned long opt)
2505{
2506 uint16_t interval = opt;
2507 struct hci_dev *hdev = req->hdev;
2508 u8 own_addr_type;
2509 int err;
2510
2511 BT_DBG("%s", hdev->name);
2512
2513 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2514 hci_dev_lock(hdev);
2515
2516 /* Don't let discovery abort an outgoing connection attempt
2517 * that's using directed advertising.
2518 */
2519 if (hci_lookup_le_connect(hdev)) {
2520 hci_dev_unlock(hdev);
2521 return -EBUSY;
2522 }
2523
2524 cancel_adv_timeout(hdev);
2525 hci_dev_unlock(hdev);
2526
2527 __hci_req_disable_advertising(req);
2528 }
2529
2530 /* If controller is scanning, it means the background scanning is
2531 * running. Thus, we should temporarily stop it in order to set the
2532 * discovery scanning parameters.
2533 */
2534 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2535 hci_req_add_le_scan_disable(req);
2536
2537 /* All active scans will be done with either a resolvable private
2538 * address (when privacy feature has been enabled) or non-resolvable
2539 * private address.
2540 */
2541 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2542 &own_addr_type);
2543 if (err < 0)
2544 own_addr_type = ADDR_LE_DEV_PUBLIC;
2545
2546 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2547 own_addr_type, 0);
2548 return 0;
2549}
2550
2551static int interleaved_discov(struct hci_request *req, unsigned long opt)
2552{
2553 int err;
2554
2555 BT_DBG("%s", req->hdev->name);
2556
2557 err = active_scan(req, opt);
2558 if (err)
2559 return err;
2560
2561 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2562}
2563
2564static void start_discovery(struct hci_dev *hdev, u8 *status)
2565{
2566 unsigned long timeout;
2567
2568 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2569
2570 switch (hdev->discovery.type) {
2571 case DISCOV_TYPE_BREDR:
2572 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2573 hci_req_sync(hdev, bredr_inquiry,
2574 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2575 status);
2576 return;
2577 case DISCOV_TYPE_INTERLEAVED:
2578 /* When running simultaneous discovery, the LE scanning time
2579 * should occupy the whole discovery time sine BR/EDR inquiry
2580 * and LE scanning are scheduled by the controller.
2581 *
2582 * For interleaving discovery in comparison, BR/EDR inquiry
2583 * and LE scanning are done sequentially with separate
2584 * timeouts.
2585 */
2586 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2587 &hdev->quirks)) {
2588 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2589 /* During simultaneous discovery, we double LE scan
2590 * interval. We must leave some time for the controller
2591 * to do BR/EDR inquiry.
2592 */
2593 hci_req_sync(hdev, interleaved_discov,
2594 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2595 status);
2596 break;
2597 }
2598
2599 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2600 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2601 HCI_CMD_TIMEOUT, status);
2602 break;
2603 case DISCOV_TYPE_LE:
2604 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2605 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2606 HCI_CMD_TIMEOUT, status);
2607 break;
2608 default:
2609 *status = HCI_ERROR_UNSPECIFIED;
2610 return;
2611 }
2612
2613 if (*status)
2614 return;
2615
2616 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2617
2618 /* When service discovery is used and the controller has a
2619 * strict duplicate filter, it is important to remember the
2620 * start and duration of the scan. This is required for
2621 * restarting scanning during the discovery phase.
2622 */
2623 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2624 hdev->discovery.result_filtering) {
2625 hdev->discovery.scan_start = jiffies;
2626 hdev->discovery.scan_duration = timeout;
2627 }
2628
2629 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2630 timeout);
2631}
2632
2633bool hci_req_stop_discovery(struct hci_request *req)
2634{
2635 struct hci_dev *hdev = req->hdev;
2636 struct discovery_state *d = &hdev->discovery;
2637 struct hci_cp_remote_name_req_cancel cp;
2638 struct inquiry_entry *e;
2639 bool ret = false;
2640
2641 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2642
2643 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2644 if (test_bit(HCI_INQUIRY, &hdev->flags))
2645 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2646
2647 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2648 cancel_delayed_work(&hdev->le_scan_disable);
2649 hci_req_add_le_scan_disable(req);
2650 }
2651
2652 ret = true;
2653 } else {
2654 /* Passive scanning */
2655 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2656 hci_req_add_le_scan_disable(req);
2657 ret = true;
2658 }
2659 }
2660
2661 /* No further actions needed for LE-only discovery */
2662 if (d->type == DISCOV_TYPE_LE)
2663 return ret;
2664
2665 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2666 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2667 NAME_PENDING);
2668 if (!e)
2669 return ret;
2670
2671 bacpy(&cp.bdaddr, &e->data.bdaddr);
2672 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2673 &cp);
2674 ret = true;
2675 }
2676
2677 return ret;
2678}
2679
2680static int stop_discovery(struct hci_request *req, unsigned long opt)
2681{
2682 hci_dev_lock(req->hdev);
2683 hci_req_stop_discovery(req);
2684 hci_dev_unlock(req->hdev);
2685
2686 return 0;
2687}
2688
2689static void discov_update(struct work_struct *work)
2690{
2691 struct hci_dev *hdev = container_of(work, struct hci_dev,
2692 discov_update);
2693 u8 status = 0;
2694
2695 switch (hdev->discovery.state) {
2696 case DISCOVERY_STARTING:
2697 start_discovery(hdev, &status);
2698 mgmt_start_discovery_complete(hdev, status);
2699 if (status)
2700 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2701 else
2702 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2703 break;
2704 case DISCOVERY_STOPPING:
2705 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2706 mgmt_stop_discovery_complete(hdev, status);
2707 if (!status)
2708 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2709 break;
2710 case DISCOVERY_STOPPED:
2711 default:
2712 return;
2713 }
2714}
2715
2716static void discov_off(struct work_struct *work)
2717{
2718 struct hci_dev *hdev = container_of(work, struct hci_dev,
2719 discov_off.work);
2720
2721 BT_DBG("%s", hdev->name);
2722
2723 hci_dev_lock(hdev);
2724
2725 /* When discoverable timeout triggers, then just make sure
2726 * the limited discoverable flag is cleared. Even in the case
2727 * of a timeout triggered from general discoverable, it is
2728 * safe to unconditionally clear the flag.
2729 */
2730 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2731 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2732 hdev->discov_timeout = 0;
2733
2734 hci_dev_unlock(hdev);
2735
2736 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2737 mgmt_new_settings(hdev);
2738}
2739
2740static int powered_update_hci(struct hci_request *req, unsigned long opt)
2741{
2742 struct hci_dev *hdev = req->hdev;
2743 u8 link_sec;
2744
2745 hci_dev_lock(hdev);
2746
2747 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2748 !lmp_host_ssp_capable(hdev)) {
2749 u8 mode = 0x01;
2750
2751 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2752
2753 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2754 u8 support = 0x01;
2755
2756 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2757 sizeof(support), &support);
2758 }
2759 }
2760
2761 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2762 lmp_bredr_capable(hdev)) {
2763 struct hci_cp_write_le_host_supported cp;
2764
2765 cp.le = 0x01;
2766 cp.simul = 0x00;
2767
2768 /* Check first if we already have the right
2769 * host state (host features set)
2770 */
2771 if (cp.le != lmp_host_le_capable(hdev) ||
2772 cp.simul != lmp_host_le_br_capable(hdev))
2773 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2774 sizeof(cp), &cp);
2775 }
2776
2777 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2778 /* Make sure the controller has a good default for
2779 * advertising data. This also applies to the case
2780 * where BR/EDR was toggled during the AUTO_OFF phase.
2781 */
2782 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2783 list_empty(&hdev->adv_instances)) {
2784 int err;
2785
2786 if (ext_adv_capable(hdev)) {
2787 err = __hci_req_setup_ext_adv_instance(req,
2788 0x00);
2789 if (!err)
2790 __hci_req_update_scan_rsp_data(req,
2791 0x00);
2792 } else {
2793 err = 0;
2794 __hci_req_update_adv_data(req, 0x00);
2795 __hci_req_update_scan_rsp_data(req, 0x00);
2796 }
2797
2798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2799 if (!ext_adv_capable(hdev))
2800 __hci_req_enable_advertising(req);
2801 else if (!err)
2802 __hci_req_enable_ext_advertising(req,
2803 0x00);
2804 }
2805 } else if (!list_empty(&hdev->adv_instances)) {
2806 struct adv_info *adv_instance;
2807
2808 adv_instance = list_first_entry(&hdev->adv_instances,
2809 struct adv_info, list);
2810 __hci_req_schedule_adv_instance(req,
2811 adv_instance->instance,
2812 true);
2813 }
2814 }
2815
2816 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2817 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2818 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2819 sizeof(link_sec), &link_sec);
2820
2821 if (lmp_bredr_capable(hdev)) {
2822 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2823 __hci_req_write_fast_connectable(req, true);
2824 else
2825 __hci_req_write_fast_connectable(req, false);
2826 __hci_req_update_scan(req);
2827 __hci_req_update_class(req);
2828 __hci_req_update_name(req);
2829 __hci_req_update_eir(req);
2830 }
2831
2832 hci_dev_unlock(hdev);
2833 return 0;
2834}
2835
2836int __hci_req_hci_power_on(struct hci_dev *hdev)
2837{
2838 /* Register the available SMP channels (BR/EDR and LE) only when
2839 * successfully powering on the controller. This late
2840 * registration is required so that LE SMP can clearly decide if
2841 * the public address or static address is used.
2842 */
2843 smp_register(hdev);
2844
2845 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2846 NULL);
2847}
2848
2849void hci_request_setup(struct hci_dev *hdev)
2850{
2851 INIT_WORK(&hdev->discov_update, discov_update);
2852 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2853 INIT_WORK(&hdev->scan_update, scan_update_work);
2854 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2855 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2856 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2857 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2858 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2859 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2860}
2861
2862void hci_request_cancel_all(struct hci_dev *hdev)
2863{
2864 hci_req_sync_cancel(hdev, ENODEV);
2865
2866 cancel_work_sync(&hdev->discov_update);
2867 cancel_work_sync(&hdev->bg_scan_update);
2868 cancel_work_sync(&hdev->scan_update);
2869 cancel_work_sync(&hdev->connectable_update);
2870 cancel_work_sync(&hdev->discoverable_update);
2871 cancel_delayed_work_sync(&hdev->discov_off);
2872 cancel_delayed_work_sync(&hdev->le_scan_disable);
2873 cancel_delayed_work_sync(&hdev->le_scan_restart);
2874
2875 if (hdev->adv_instance_timeout) {
2876 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2877 hdev->adv_instance_timeout = 0;
2878 }
2879}