blob: a8ddd211e94c2c6cc0562855b45c6eb58815923f [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <linux/sched/signal.h>
25
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
28#include <net/bluetooth/mgmt.h>
29
30#include "smp.h"
31#include "hci_request.h"
32
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
49bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
54static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
56{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
92int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 err = hci_req_run_skb(&req, hci_req_sync_complete);
143 if (err < 0)
144 return ERR_PTR(err);
145
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
148
149 if (err == -ERESTARTSYS)
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
194 unsigned long opt, u32 timeout, u8 *hci_status)
195{
196 struct hci_request req;
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
224 return 0;
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
229
230 return err;
231 }
232
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
235
236 if (err == -ERESTARTSYS)
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
242 if (hci_status)
243 *hci_status = hdev->req_result;
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
250 break;
251
252 default:
253 err = -ETIMEDOUT;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257 }
258
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
270 unsigned long opt, u32 timeout, u8 *hci_status)
271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
278 hci_req_sync_lock(hdev);
279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
280 hci_req_sync_unlock(hdev);
281
282 return ret;
283}
284
285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
301 skb_put_data(skb, param, plen);
302
303 BT_DBG("skb len %d", skb->len);
304
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
336
337 bt_cb(skb)->hci.req_event = event;
338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
366 type = PAGE_SCAN_TYPE_STANDARD; /* default */
367
368 /* default 1.28 sec page scan */
369 acp.interval = cpu_to_le16(0x0800);
370 }
371
372 acp.window = cpu_to_le16(0x0012);
373
374 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
375 __cpu_to_le16(hdev->page_scan_window) != acp.window)
376 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
377 sizeof(acp), &acp);
378
379 if (hdev->page_scan_type != type)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
381}
382
383/* This function controls the background scanning based on hdev->pend_le_conns
384 * list. If there are pending LE connection we start the background scanning,
385 * otherwise we stop it.
386 *
387 * This function requires the caller holds hdev->lock.
388 */
389static void __hci_update_background_scan(struct hci_request *req)
390{
391 struct hci_dev *hdev = req->hdev;
392
393 if (!test_bit(HCI_UP, &hdev->flags) ||
394 test_bit(HCI_INIT, &hdev->flags) ||
395 hci_dev_test_flag(hdev, HCI_SETUP) ||
396 hci_dev_test_flag(hdev, HCI_CONFIG) ||
397 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
398 hci_dev_test_flag(hdev, HCI_UNREGISTER))
399 return;
400
401 /* No point in doing scanning if LE support hasn't been enabled */
402 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
403 return;
404
405 /* If discovery is active don't interfere with it */
406 if (hdev->discovery.state != DISCOVERY_STOPPED)
407 return;
408
409 /* Reset RSSI and UUID filters when starting background scanning
410 * since these filters are meant for service discovery only.
411 *
412 * The Start Discovery and Start Service Discovery operations
413 * ensure to set proper values for RSSI threshold and UUID
414 * filter list. So it is safe to just reset them here.
415 */
416 hci_discovery_filter_clear(hdev);
417
418 if (list_empty(&hdev->pend_le_conns) &&
419 list_empty(&hdev->pend_le_reports)) {
420 /* If there is no pending LE connections or devices
421 * to be scanned for, we should stop the background
422 * scanning.
423 */
424
425 /* If controller is not scanning we are done. */
426 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
427 return;
428
429 hci_req_add_le_scan_disable(req);
430
431 BT_DBG("%s stopping background scanning", hdev->name);
432 } else {
433 /* If there is at least one pending LE connection, we should
434 * keep the background scan running.
435 */
436
437 /* If controller is connecting, we should not start scanning
438 * since some controllers are not able to scan and connect at
439 * the same time.
440 */
441 if (hci_lookup_le_connect(hdev))
442 return;
443
444 /* If controller is currently scanning, we stop it to ensure we
445 * don't miss any advertising (due to duplicates filter).
446 */
447 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
448 hci_req_add_le_scan_disable(req);
449
450 hci_req_add_le_passive_scan(req);
451
452 BT_DBG("%s starting background scanning", hdev->name);
453 }
454}
455
456void __hci_req_update_name(struct hci_request *req)
457{
458 struct hci_dev *hdev = req->hdev;
459 struct hci_cp_write_local_name cp;
460
461 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
462
463 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
464}
465
466#define PNP_INFO_SVCLASS_ID 0x1200
467
468static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
469{
470 u8 *ptr = data, *uuids_start = NULL;
471 struct bt_uuid *uuid;
472
473 if (len < 4)
474 return ptr;
475
476 list_for_each_entry(uuid, &hdev->uuids, list) {
477 u16 uuid16;
478
479 if (uuid->size != 16)
480 continue;
481
482 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
483 if (uuid16 < 0x1100)
484 continue;
485
486 if (uuid16 == PNP_INFO_SVCLASS_ID)
487 continue;
488
489 if (!uuids_start) {
490 uuids_start = ptr;
491 uuids_start[0] = 1;
492 uuids_start[1] = EIR_UUID16_ALL;
493 ptr += 2;
494 }
495
496 /* Stop if not enough space to put next UUID */
497 if ((ptr - data) + sizeof(u16) > len) {
498 uuids_start[1] = EIR_UUID16_SOME;
499 break;
500 }
501
502 *ptr++ = (uuid16 & 0x00ff);
503 *ptr++ = (uuid16 & 0xff00) >> 8;
504 uuids_start[0] += sizeof(uuid16);
505 }
506
507 return ptr;
508}
509
510static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
511{
512 u8 *ptr = data, *uuids_start = NULL;
513 struct bt_uuid *uuid;
514
515 if (len < 6)
516 return ptr;
517
518 list_for_each_entry(uuid, &hdev->uuids, list) {
519 if (uuid->size != 32)
520 continue;
521
522 if (!uuids_start) {
523 uuids_start = ptr;
524 uuids_start[0] = 1;
525 uuids_start[1] = EIR_UUID32_ALL;
526 ptr += 2;
527 }
528
529 /* Stop if not enough space to put next UUID */
530 if ((ptr - data) + sizeof(u32) > len) {
531 uuids_start[1] = EIR_UUID32_SOME;
532 break;
533 }
534
535 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
536 ptr += sizeof(u32);
537 uuids_start[0] += sizeof(u32);
538 }
539
540 return ptr;
541}
542
543static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
544{
545 u8 *ptr = data, *uuids_start = NULL;
546 struct bt_uuid *uuid;
547
548 if (len < 18)
549 return ptr;
550
551 list_for_each_entry(uuid, &hdev->uuids, list) {
552 if (uuid->size != 128)
553 continue;
554
555 if (!uuids_start) {
556 uuids_start = ptr;
557 uuids_start[0] = 1;
558 uuids_start[1] = EIR_UUID128_ALL;
559 ptr += 2;
560 }
561
562 /* Stop if not enough space to put next UUID */
563 if ((ptr - data) + 16 > len) {
564 uuids_start[1] = EIR_UUID128_SOME;
565 break;
566 }
567
568 memcpy(ptr, uuid->uuid, 16);
569 ptr += 16;
570 uuids_start[0] += 16;
571 }
572
573 return ptr;
574}
575
576static void create_eir(struct hci_dev *hdev, u8 *data)
577{
578 u8 *ptr = data;
579 size_t name_len;
580
581 name_len = strlen(hdev->dev_name);
582
583 if (name_len > 0) {
584 /* EIR Data type */
585 if (name_len > 48) {
586 name_len = 48;
587 ptr[1] = EIR_NAME_SHORT;
588 } else
589 ptr[1] = EIR_NAME_COMPLETE;
590
591 /* EIR Data length */
592 ptr[0] = name_len + 1;
593
594 memcpy(ptr + 2, hdev->dev_name, name_len);
595
596 ptr += (name_len + 2);
597 }
598
599 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
600 ptr[0] = 2;
601 ptr[1] = EIR_TX_POWER;
602 ptr[2] = (u8) hdev->inq_tx_power;
603
604 ptr += 3;
605 }
606
607 if (hdev->devid_source > 0) {
608 ptr[0] = 9;
609 ptr[1] = EIR_DEVICE_ID;
610
611 put_unaligned_le16(hdev->devid_source, ptr + 2);
612 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
613 put_unaligned_le16(hdev->devid_product, ptr + 6);
614 put_unaligned_le16(hdev->devid_version, ptr + 8);
615
616 ptr += 10;
617 }
618
619 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
620 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
621 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622}
623
624void __hci_req_update_eir(struct hci_request *req)
625{
626 struct hci_dev *hdev = req->hdev;
627 struct hci_cp_write_eir cp;
628
629 if (!hdev_is_powered(hdev))
630 return;
631
632 if (!lmp_ext_inq_capable(hdev))
633 return;
634
635 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
636 return;
637
638 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
639 return;
640
641 memset(&cp, 0, sizeof(cp));
642
643 create_eir(hdev, cp.data);
644
645 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
646 return;
647
648 memcpy(hdev->eir, cp.data, sizeof(cp.data));
649
650 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651}
652
653void hci_req_add_le_scan_disable(struct hci_request *req)
654{
655 struct hci_dev *hdev = req->hdev;
656
657 if (use_ext_scan(hdev)) {
658 struct hci_cp_le_set_ext_scan_enable cp;
659
660 memset(&cp, 0, sizeof(cp));
661 cp.enable = LE_SCAN_DISABLE;
662 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
663 &cp);
664 } else {
665 struct hci_cp_le_set_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
670 }
671}
672
673static void add_to_white_list(struct hci_request *req,
674 struct hci_conn_params *params)
675{
676 struct hci_cp_le_add_to_white_list cp;
677
678 cp.bdaddr_type = params->addr_type;
679 bacpy(&cp.bdaddr, &params->addr);
680
681 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
682}
683
684static u8 update_white_list(struct hci_request *req)
685{
686 struct hci_dev *hdev = req->hdev;
687 struct hci_conn_params *params;
688 struct bdaddr_list *b;
689 uint8_t white_list_entries = 0;
690
691 /* Go through the current white list programmed into the
692 * controller one by one and check if that address is still
693 * in the list of pending connections or list of devices to
694 * report. If not present in either list, then queue the
695 * command to remove it from the controller.
696 */
697 list_for_each_entry(b, &hdev->le_white_list, list) {
698 /* If the device is neither in pend_le_conns nor
699 * pend_le_reports then remove it from the whitelist.
700 */
701 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
702 &b->bdaddr, b->bdaddr_type) &&
703 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
704 &b->bdaddr, b->bdaddr_type)) {
705 struct hci_cp_le_del_from_white_list cp;
706
707 cp.bdaddr_type = b->bdaddr_type;
708 bacpy(&cp.bdaddr, &b->bdaddr);
709
710 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
711 sizeof(cp), &cp);
712 continue;
713 }
714
715 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
716 /* White list can not be used with RPAs */
717 return 0x00;
718 }
719
720 white_list_entries++;
721 }
722
723 /* Since all no longer valid white list entries have been
724 * removed, walk through the list of pending connections
725 * and ensure that any new device gets programmed into
726 * the controller.
727 *
728 * If the list of the devices is larger than the list of
729 * available white list entries in the controller, then
730 * just abort and return filer policy value to not use the
731 * white list.
732 */
733 list_for_each_entry(params, &hdev->pend_le_conns, action) {
734 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
735 &params->addr, params->addr_type))
736 continue;
737
738 if (white_list_entries >= hdev->le_white_list_size) {
739 /* Select filter policy to accept all advertising */
740 return 0x00;
741 }
742
743 if (hci_find_irk_by_addr(hdev, &params->addr,
744 params->addr_type)) {
745 /* White list can not be used with RPAs */
746 return 0x00;
747 }
748
749 white_list_entries++;
750 add_to_white_list(req, params);
751 }
752
753 /* After adding all new pending connections, walk through
754 * the list of pending reports and also add these to the
755 * white list if there is still space.
756 */
757 list_for_each_entry(params, &hdev->pend_le_reports, action) {
758 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
759 &params->addr, params->addr_type))
760 continue;
761
762 if (white_list_entries >= hdev->le_white_list_size) {
763 /* Select filter policy to accept all advertising */
764 return 0x00;
765 }
766
767 if (hci_find_irk_by_addr(hdev, &params->addr,
768 params->addr_type)) {
769 /* White list can not be used with RPAs */
770 return 0x00;
771 }
772
773 white_list_entries++;
774 add_to_white_list(req, params);
775 }
776
777 /* Select filter policy to use white list */
778 return 0x01;
779}
780
781static bool scan_use_rpa(struct hci_dev *hdev)
782{
783 return hci_dev_test_flag(hdev, HCI_PRIVACY);
784}
785
786static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
787 u16 window, u8 own_addr_type, u8 filter_policy)
788{
789 struct hci_dev *hdev = req->hdev;
790
791 /* Use ext scanning if set ext scan param and ext scan enable is
792 * supported
793 */
794 if (use_ext_scan(hdev)) {
795 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
796 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
797 struct hci_cp_le_scan_phy_params *phy_params;
798 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
799 u32 plen;
800
801 ext_param_cp = (void *)data;
802 phy_params = (void *)ext_param_cp->data;
803
804 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
805 ext_param_cp->own_addr_type = own_addr_type;
806 ext_param_cp->filter_policy = filter_policy;
807
808 plen = sizeof(*ext_param_cp);
809
810 if (scan_1m(hdev) || scan_2m(hdev)) {
811 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
812
813 memset(phy_params, 0, sizeof(*phy_params));
814 phy_params->type = type;
815 phy_params->interval = cpu_to_le16(interval);
816 phy_params->window = cpu_to_le16(window);
817
818 plen += sizeof(*phy_params);
819 phy_params++;
820 }
821
822 if (scan_coded(hdev)) {
823 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
824
825 memset(phy_params, 0, sizeof(*phy_params));
826 phy_params->type = type;
827 phy_params->interval = cpu_to_le16(interval);
828 phy_params->window = cpu_to_le16(window);
829
830 plen += sizeof(*phy_params);
831 phy_params++;
832 }
833
834 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
835 plen, ext_param_cp);
836
837 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
838 ext_enable_cp.enable = LE_SCAN_ENABLE;
839 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
840
841 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
842 sizeof(ext_enable_cp), &ext_enable_cp);
843 } else {
844 struct hci_cp_le_set_scan_param param_cp;
845 struct hci_cp_le_set_scan_enable enable_cp;
846
847 memset(&param_cp, 0, sizeof(param_cp));
848 param_cp.type = type;
849 param_cp.interval = cpu_to_le16(interval);
850 param_cp.window = cpu_to_le16(window);
851 param_cp.own_address_type = own_addr_type;
852 param_cp.filter_policy = filter_policy;
853 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
854 &param_cp);
855
856 memset(&enable_cp, 0, sizeof(enable_cp));
857 enable_cp.enable = LE_SCAN_ENABLE;
858 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
859 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
860 &enable_cp);
861 }
862}
863
864void hci_req_add_le_passive_scan(struct hci_request *req)
865{
866 struct hci_dev *hdev = req->hdev;
867 u8 own_addr_type;
868 u8 filter_policy;
869
870 /* Set require_privacy to false since no SCAN_REQ are send
871 * during passive scanning. Not using an non-resolvable address
872 * here is important so that peer devices using direct
873 * advertising with our address will be correctly reported
874 * by the controller.
875 */
876 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
877 &own_addr_type))
878 return;
879
880 /* Adding or removing entries from the white list must
881 * happen before enabling scanning. The controller does
882 * not allow white list modification while scanning.
883 */
884 filter_policy = update_white_list(req);
885
886 /* When the controller is using random resolvable addresses and
887 * with that having LE privacy enabled, then controllers with
888 * Extended Scanner Filter Policies support can now enable support
889 * for handling directed advertising.
890 *
891 * So instead of using filter polices 0x00 (no whitelist)
892 * and 0x01 (whitelist enabled) use the new filter policies
893 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
894 */
895 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
896 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
897 filter_policy |= 0x02;
898
899 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
900 hdev->le_scan_window, own_addr_type, filter_policy);
901}
902
903static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
904{
905 struct adv_info *adv_instance;
906
907 /* Ignore instance 0 */
908 if (instance == 0x00)
909 return 0;
910
911 adv_instance = hci_find_adv_instance(hdev, instance);
912 if (!adv_instance)
913 return 0;
914
915 /* TODO: Take into account the "appearance" and "local-name" flags here.
916 * These are currently being ignored as they are not supported.
917 */
918 return adv_instance->scan_rsp_len;
919}
920
921static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
922{
923 u8 instance = hdev->cur_adv_instance;
924 struct adv_info *adv_instance;
925
926 /* Ignore instance 0 */
927 if (instance == 0x00)
928 return 0;
929
930 adv_instance = hci_find_adv_instance(hdev, instance);
931 if (!adv_instance)
932 return 0;
933
934 /* TODO: Take into account the "appearance" and "local-name" flags here.
935 * These are currently being ignored as they are not supported.
936 */
937 return adv_instance->scan_rsp_len;
938}
939
940void __hci_req_disable_advertising(struct hci_request *req)
941{
942 if (ext_adv_capable(req->hdev)) {
943 struct hci_cp_le_set_ext_adv_enable cp;
944
945 cp.enable = 0x00;
946 /* Disable all sets since we only support one set at the moment */
947 cp.num_of_sets = 0x00;
948
949 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
950 } else {
951 u8 enable = 0x00;
952
953 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
954 }
955}
956
957static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
958{
959 u32 flags;
960 struct adv_info *adv_instance;
961
962 if (instance == 0x00) {
963 /* Instance 0 always manages the "Tx Power" and "Flags"
964 * fields
965 */
966 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
967
968 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
969 * corresponds to the "connectable" instance flag.
970 */
971 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
972 flags |= MGMT_ADV_FLAG_CONNECTABLE;
973
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 flags |= MGMT_ADV_FLAG_DISCOV;
978
979 return flags;
980 }
981
982 adv_instance = hci_find_adv_instance(hdev, instance);
983
984 /* Return 0 when we got an invalid instance identifier. */
985 if (!adv_instance)
986 return 0;
987
988 return adv_instance->flags;
989}
990
991static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
992{
993 /* If privacy is not enabled don't use RPA */
994 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
995 return false;
996
997 /* If basic privacy mode is enabled use RPA */
998 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
999 return true;
1000
1001 /* If limited privacy mode is enabled don't use RPA if we're
1002 * both discoverable and bondable.
1003 */
1004 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1005 hci_dev_test_flag(hdev, HCI_BONDABLE))
1006 return false;
1007
1008 /* We're neither bondable nor discoverable in the limited
1009 * privacy mode, therefore use RPA.
1010 */
1011 return true;
1012}
1013
1014static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1015{
1016 /* If there is no connection we are OK to advertise. */
1017 if (hci_conn_num(hdev, LE_LINK) == 0)
1018 return true;
1019
1020 /* Check le_states if there is any connection in slave role. */
1021 if (hdev->conn_hash.le_num_slave > 0) {
1022 /* Slave connection state and non connectable mode bit 20. */
1023 if (!connectable && !(hdev->le_states[2] & 0x10))
1024 return false;
1025
1026 /* Slave connection state and connectable mode bit 38
1027 * and scannable bit 21.
1028 */
1029 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1030 !(hdev->le_states[2] & 0x20)))
1031 return false;
1032 }
1033
1034 /* Check le_states if there is any connection in master role. */
1035 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1036 /* Master connection state and non connectable mode bit 18. */
1037 if (!connectable && !(hdev->le_states[2] & 0x02))
1038 return false;
1039
1040 /* Master connection state and connectable mode bit 35 and
1041 * scannable 19.
1042 */
1043 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1044 !(hdev->le_states[2] & 0x08)))
1045 return false;
1046 }
1047
1048 return true;
1049}
1050
1051void __hci_req_enable_advertising(struct hci_request *req)
1052{
1053 struct hci_dev *hdev = req->hdev;
1054 struct hci_cp_le_set_adv_param cp;
1055 u8 own_addr_type, enable = 0x01;
1056 bool connectable;
1057 u32 flags;
1058
1059 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1060
1061 /* If the "connectable" instance flag was not set, then choose between
1062 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1063 */
1064 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1065 mgmt_get_connectable(hdev);
1066
1067 if (!is_advertising_allowed(hdev, connectable))
1068 return;
1069
1070 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1071 __hci_req_disable_advertising(req);
1072
1073 /* Clear the HCI_LE_ADV bit temporarily so that the
1074 * hci_update_random_address knows that it's safe to go ahead
1075 * and write a new random address. The flag will be set back on
1076 * as soon as the SET_ADV_ENABLE HCI command completes.
1077 */
1078 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1079
1080 /* Set require_privacy to true only when non-connectable
1081 * advertising is used. In that case it is fine to use a
1082 * non-resolvable private address.
1083 */
1084 if (hci_update_random_address(req, !connectable,
1085 adv_use_rpa(hdev, flags),
1086 &own_addr_type) < 0)
1087 return;
1088
1089 memset(&cp, 0, sizeof(cp));
1090 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1091 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1092
1093 if (connectable)
1094 cp.type = LE_ADV_IND;
1095 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1096 cp.type = LE_ADV_SCAN_IND;
1097 else
1098 cp.type = LE_ADV_NONCONN_IND;
1099
1100 cp.own_address_type = own_addr_type;
1101 cp.channel_map = hdev->le_adv_channel_map;
1102
1103 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1104
1105 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1106}
1107
1108u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1109{
1110 size_t short_len;
1111 size_t complete_len;
1112
1113 /* no space left for name (+ NULL + type + len) */
1114 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1115 return ad_len;
1116
1117 /* use complete name if present and fits */
1118 complete_len = strlen(hdev->dev_name);
1119 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1120 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1121 hdev->dev_name, complete_len + 1);
1122
1123 /* use short name if present */
1124 short_len = strlen(hdev->short_name);
1125 if (short_len)
1126 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1127 hdev->short_name, short_len + 1);
1128
1129 /* use shortened full name if present, we already know that name
1130 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1131 */
1132 if (complete_len) {
1133 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1134
1135 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1136 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1137
1138 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1139 sizeof(name));
1140 }
1141
1142 return ad_len;
1143}
1144
1145static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1146{
1147 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1148}
1149
1150static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1151{
1152 u8 scan_rsp_len = 0;
1153
1154 if (hdev->appearance) {
1155 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1156 }
1157
1158 return append_local_name(hdev, ptr, scan_rsp_len);
1159}
1160
1161static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1162 u8 *ptr)
1163{
1164 struct adv_info *adv_instance;
1165 u32 instance_flags;
1166 u8 scan_rsp_len = 0;
1167
1168 adv_instance = hci_find_adv_instance(hdev, instance);
1169 if (!adv_instance)
1170 return 0;
1171
1172 instance_flags = adv_instance->flags;
1173
1174 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1175 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1176 }
1177
1178 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1179 adv_instance->scan_rsp_len);
1180
1181 scan_rsp_len += adv_instance->scan_rsp_len;
1182
1183 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1184 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1185
1186 return scan_rsp_len;
1187}
1188
1189void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1190{
1191 struct hci_dev *hdev = req->hdev;
1192 u8 len;
1193
1194 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1195 return;
1196
1197 if (ext_adv_capable(hdev)) {
1198 struct hci_cp_le_set_ext_scan_rsp_data cp;
1199
1200 memset(&cp, 0, sizeof(cp));
1201
1202 if (instance)
1203 len = create_instance_scan_rsp_data(hdev, instance,
1204 cp.data);
1205 else
1206 len = create_default_scan_rsp_data(hdev, cp.data);
1207
1208 if (hdev->scan_rsp_data_len == len &&
1209 !memcmp(cp.data, hdev->scan_rsp_data, len))
1210 return;
1211
1212 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1213 hdev->scan_rsp_data_len = len;
1214
1215 cp.handle = 0;
1216 cp.length = len;
1217 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1218 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1219
1220 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1221 &cp);
1222 } else {
1223 struct hci_cp_le_set_scan_rsp_data cp;
1224
1225 memset(&cp, 0, sizeof(cp));
1226
1227 if (instance)
1228 len = create_instance_scan_rsp_data(hdev, instance,
1229 cp.data);
1230 else
1231 len = create_default_scan_rsp_data(hdev, cp.data);
1232
1233 if (hdev->scan_rsp_data_len == len &&
1234 !memcmp(cp.data, hdev->scan_rsp_data, len))
1235 return;
1236
1237 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1238 hdev->scan_rsp_data_len = len;
1239
1240 cp.length = len;
1241
1242 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1243 }
1244}
1245
1246static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1247{
1248 struct adv_info *adv_instance = NULL;
1249 u8 ad_len = 0, flags = 0;
1250 u32 instance_flags;
1251
1252 /* Return 0 when the current instance identifier is invalid. */
1253 if (instance) {
1254 adv_instance = hci_find_adv_instance(hdev, instance);
1255 if (!adv_instance)
1256 return 0;
1257 }
1258
1259 instance_flags = get_adv_instance_flags(hdev, instance);
1260
1261 /* If instance already has the flags set skip adding it once
1262 * again.
1263 */
1264 if (adv_instance && eir_get_data(adv_instance->adv_data,
1265 adv_instance->adv_data_len, EIR_FLAGS,
1266 NULL))
1267 goto skip_flags;
1268
1269 /* The Add Advertising command allows userspace to set both the general
1270 * and limited discoverable flags.
1271 */
1272 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1273 flags |= LE_AD_GENERAL;
1274
1275 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1276 flags |= LE_AD_LIMITED;
1277
1278 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1279 flags |= LE_AD_NO_BREDR;
1280
1281 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1282 /* If a discovery flag wasn't provided, simply use the global
1283 * settings.
1284 */
1285 if (!flags)
1286 flags |= mgmt_get_adv_discov_flags(hdev);
1287
1288 /* If flags would still be empty, then there is no need to
1289 * include the "Flags" AD field".
1290 */
1291 if (flags) {
1292 ptr[0] = 0x02;
1293 ptr[1] = EIR_FLAGS;
1294 ptr[2] = flags;
1295
1296 ad_len += 3;
1297 ptr += 3;
1298 }
1299 }
1300
1301skip_flags:
1302 if (adv_instance) {
1303 memcpy(ptr, adv_instance->adv_data,
1304 adv_instance->adv_data_len);
1305 ad_len += adv_instance->adv_data_len;
1306 ptr += adv_instance->adv_data_len;
1307 }
1308
1309 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1310 s8 adv_tx_power;
1311
1312 if (ext_adv_capable(hdev)) {
1313 if (adv_instance)
1314 adv_tx_power = adv_instance->tx_power;
1315 else
1316 adv_tx_power = hdev->adv_tx_power;
1317 } else {
1318 adv_tx_power = hdev->adv_tx_power;
1319 }
1320
1321 /* Provide Tx Power only if we can provide a valid value for it */
1322 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1323 ptr[0] = 0x02;
1324 ptr[1] = EIR_TX_POWER;
1325 ptr[2] = (u8)adv_tx_power;
1326
1327 ad_len += 3;
1328 ptr += 3;
1329 }
1330 }
1331
1332 return ad_len;
1333}
1334
1335void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1336{
1337 struct hci_dev *hdev = req->hdev;
1338 u8 len;
1339
1340 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1341 return;
1342
1343 if (ext_adv_capable(hdev)) {
1344 struct hci_cp_le_set_ext_adv_data cp;
1345
1346 memset(&cp, 0, sizeof(cp));
1347
1348 len = create_instance_adv_data(hdev, instance, cp.data);
1349
1350 /* There's nothing to do if the data hasn't changed */
1351 if (hdev->adv_data_len == len &&
1352 memcmp(cp.data, hdev->adv_data, len) == 0)
1353 return;
1354
1355 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1356 hdev->adv_data_len = len;
1357
1358 cp.length = len;
1359 cp.handle = 0;
1360 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1361 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1362
1363 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1364 } else {
1365 struct hci_cp_le_set_adv_data cp;
1366
1367 memset(&cp, 0, sizeof(cp));
1368
1369 len = create_instance_adv_data(hdev, instance, cp.data);
1370
1371 /* There's nothing to do if the data hasn't changed */
1372 if (hdev->adv_data_len == len &&
1373 memcmp(cp.data, hdev->adv_data, len) == 0)
1374 return;
1375
1376 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1377 hdev->adv_data_len = len;
1378
1379 cp.length = len;
1380
1381 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1382 }
1383}
1384
1385int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1386{
1387 struct hci_request req;
1388
1389 hci_req_init(&req, hdev);
1390 __hci_req_update_adv_data(&req, instance);
1391
1392 return hci_req_run(&req, NULL);
1393}
1394
1395static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1396{
1397 BT_DBG("%s status %u", hdev->name, status);
1398}
1399
1400void hci_req_reenable_advertising(struct hci_dev *hdev)
1401{
1402 struct hci_request req;
1403
1404 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1405 list_empty(&hdev->adv_instances))
1406 return;
1407
1408 hci_req_init(&req, hdev);
1409
1410 if (hdev->cur_adv_instance) {
1411 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1412 true);
1413 } else {
1414 if (ext_adv_capable(hdev)) {
1415 __hci_req_start_ext_adv(&req, 0x00);
1416 } else {
1417 __hci_req_update_adv_data(&req, 0x00);
1418 __hci_req_update_scan_rsp_data(&req, 0x00);
1419 __hci_req_enable_advertising(&req);
1420 }
1421 }
1422
1423 hci_req_run(&req, adv_enable_complete);
1424}
1425
1426static void adv_timeout_expire(struct work_struct *work)
1427{
1428 struct hci_dev *hdev = container_of(work, struct hci_dev,
1429 adv_instance_expire.work);
1430
1431 struct hci_request req;
1432 u8 instance;
1433
1434 BT_DBG("%s", hdev->name);
1435
1436 hci_dev_lock(hdev);
1437
1438 hdev->adv_instance_timeout = 0;
1439
1440 instance = hdev->cur_adv_instance;
1441 if (instance == 0x00)
1442 goto unlock;
1443
1444 hci_req_init(&req, hdev);
1445
1446 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1447
1448 if (list_empty(&hdev->adv_instances))
1449 __hci_req_disable_advertising(&req);
1450
1451 hci_req_run(&req, NULL);
1452
1453unlock:
1454 hci_dev_unlock(hdev);
1455}
1456
1457int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1458 bool use_rpa, struct adv_info *adv_instance,
1459 u8 *own_addr_type, bdaddr_t *rand_addr)
1460{
1461 int err;
1462
1463 bacpy(rand_addr, BDADDR_ANY);
1464
1465 /* If privacy is enabled use a resolvable private address. If
1466 * current RPA has expired then generate a new one.
1467 */
1468 if (use_rpa) {
1469 int to;
1470
1471 *own_addr_type = ADDR_LE_DEV_RANDOM;
1472
1473 if (adv_instance) {
1474 if (!adv_instance->rpa_expired &&
1475 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1476 return 0;
1477
1478 adv_instance->rpa_expired = false;
1479 } else {
1480 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1481 !bacmp(&hdev->random_addr, &hdev->rpa))
1482 return 0;
1483 }
1484
1485 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1486 if (err < 0) {
1487 BT_ERR("%s failed to generate new RPA", hdev->name);
1488 return err;
1489 }
1490
1491 bacpy(rand_addr, &hdev->rpa);
1492
1493 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1494 if (adv_instance)
1495 queue_delayed_work(hdev->workqueue,
1496 &adv_instance->rpa_expired_cb, to);
1497 else
1498 queue_delayed_work(hdev->workqueue,
1499 &hdev->rpa_expired, to);
1500
1501 return 0;
1502 }
1503
1504 /* In case of required privacy without resolvable private address,
1505 * use an non-resolvable private address. This is useful for
1506 * non-connectable advertising.
1507 */
1508 if (require_privacy) {
1509 bdaddr_t nrpa;
1510
1511 while (true) {
1512 /* The non-resolvable private address is generated
1513 * from random six bytes with the two most significant
1514 * bits cleared.
1515 */
1516 get_random_bytes(&nrpa, 6);
1517 nrpa.b[5] &= 0x3f;
1518
1519 /* The non-resolvable private address shall not be
1520 * equal to the public address.
1521 */
1522 if (bacmp(&hdev->bdaddr, &nrpa))
1523 break;
1524 }
1525
1526 *own_addr_type = ADDR_LE_DEV_RANDOM;
1527 bacpy(rand_addr, &nrpa);
1528
1529 return 0;
1530 }
1531
1532 /* No privacy so use a public address. */
1533 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1534
1535 return 0;
1536}
1537
1538void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1539{
1540 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1541}
1542
1543int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1544{
1545 struct hci_cp_le_set_ext_adv_params cp;
1546 struct hci_dev *hdev = req->hdev;
1547 bool connectable;
1548 u32 flags;
1549 bdaddr_t random_addr;
1550 u8 own_addr_type;
1551 int err;
1552 struct adv_info *adv_instance;
1553 bool secondary_adv;
1554 /* In ext adv set param interval is 3 octets */
1555 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1556
1557 if (instance > 0) {
1558 adv_instance = hci_find_adv_instance(hdev, instance);
1559 if (!adv_instance)
1560 return -EINVAL;
1561 } else {
1562 adv_instance = NULL;
1563 }
1564
1565 flags = get_adv_instance_flags(hdev, instance);
1566
1567 /* If the "connectable" instance flag was not set, then choose between
1568 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1569 */
1570 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1571 mgmt_get_connectable(hdev);
1572
1573 if (!is_advertising_allowed(hdev, connectable))
1574 return -EPERM;
1575
1576 /* Set require_privacy to true only when non-connectable
1577 * advertising is used. In that case it is fine to use a
1578 * non-resolvable private address.
1579 */
1580 err = hci_get_random_address(hdev, !connectable,
1581 adv_use_rpa(hdev, flags), adv_instance,
1582 &own_addr_type, &random_addr);
1583 if (err < 0)
1584 return err;
1585
1586 memset(&cp, 0, sizeof(cp));
1587
1588 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1589 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1590
1591 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1592
1593 if (connectable) {
1594 if (secondary_adv)
1595 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1596 else
1597 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1598 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1599 if (secondary_adv)
1600 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1601 else
1602 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1603 } else {
1604 if (secondary_adv)
1605 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1606 else
1607 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1608 }
1609
1610 cp.own_addr_type = own_addr_type;
1611 cp.channel_map = hdev->le_adv_channel_map;
1612 cp.tx_power = 127;
1613 cp.handle = 0;
1614
1615 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1616 cp.primary_phy = HCI_ADV_PHY_1M;
1617 cp.secondary_phy = HCI_ADV_PHY_2M;
1618 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1619 cp.primary_phy = HCI_ADV_PHY_CODED;
1620 cp.secondary_phy = HCI_ADV_PHY_CODED;
1621 } else {
1622 /* In all other cases use 1M */
1623 cp.primary_phy = HCI_ADV_PHY_1M;
1624 cp.secondary_phy = HCI_ADV_PHY_1M;
1625 }
1626
1627 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1628
1629 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1630 bacmp(&random_addr, BDADDR_ANY)) {
1631 struct hci_cp_le_set_adv_set_rand_addr cp;
1632
1633 /* Check if random address need to be updated */
1634 if (adv_instance) {
1635 if (!bacmp(&random_addr, &adv_instance->random_addr))
1636 return 0;
1637 } else {
1638 if (!bacmp(&random_addr, &hdev->random_addr))
1639 return 0;
1640 }
1641
1642 memset(&cp, 0, sizeof(cp));
1643
1644 cp.handle = 0;
1645 bacpy(&cp.bdaddr, &random_addr);
1646
1647 hci_req_add(req,
1648 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1649 sizeof(cp), &cp);
1650 }
1651
1652 return 0;
1653}
1654
1655void __hci_req_enable_ext_advertising(struct hci_request *req)
1656{
1657 struct hci_cp_le_set_ext_adv_enable *cp;
1658 struct hci_cp_ext_adv_set *adv_set;
1659 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1660
1661 cp = (void *) data;
1662 adv_set = (void *) cp->data;
1663
1664 memset(cp, 0, sizeof(*cp));
1665
1666 cp->enable = 0x01;
1667 cp->num_of_sets = 0x01;
1668
1669 memset(adv_set, 0, sizeof(*adv_set));
1670
1671 adv_set->handle = 0;
1672
1673 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1674 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1675 data);
1676}
1677
1678int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1679{
1680 struct hci_dev *hdev = req->hdev;
1681 int err;
1682
1683 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1684 __hci_req_disable_advertising(req);
1685
1686 err = __hci_req_setup_ext_adv_instance(req, instance);
1687 if (err < 0)
1688 return err;
1689
1690 __hci_req_update_scan_rsp_data(req, instance);
1691 __hci_req_enable_ext_advertising(req);
1692
1693 return 0;
1694}
1695
1696int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1697 bool force)
1698{
1699 struct hci_dev *hdev = req->hdev;
1700 struct adv_info *adv_instance = NULL;
1701 u16 timeout;
1702
1703 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1704 list_empty(&hdev->adv_instances))
1705 return -EPERM;
1706
1707 if (hdev->adv_instance_timeout)
1708 return -EBUSY;
1709
1710 adv_instance = hci_find_adv_instance(hdev, instance);
1711 if (!adv_instance)
1712 return -ENOENT;
1713
1714 /* A zero timeout means unlimited advertising. As long as there is
1715 * only one instance, duration should be ignored. We still set a timeout
1716 * in case further instances are being added later on.
1717 *
1718 * If the remaining lifetime of the instance is more than the duration
1719 * then the timeout corresponds to the duration, otherwise it will be
1720 * reduced to the remaining instance lifetime.
1721 */
1722 if (adv_instance->timeout == 0 ||
1723 adv_instance->duration <= adv_instance->remaining_time)
1724 timeout = adv_instance->duration;
1725 else
1726 timeout = adv_instance->remaining_time;
1727
1728 /* The remaining time is being reduced unless the instance is being
1729 * advertised without time limit.
1730 */
1731 if (adv_instance->timeout)
1732 adv_instance->remaining_time =
1733 adv_instance->remaining_time - timeout;
1734
1735 hdev->adv_instance_timeout = timeout;
1736 queue_delayed_work(hdev->req_workqueue,
1737 &hdev->adv_instance_expire,
1738 msecs_to_jiffies(timeout * 1000));
1739
1740 /* If we're just re-scheduling the same instance again then do not
1741 * execute any HCI commands. This happens when a single instance is
1742 * being advertised.
1743 */
1744 if (!force && hdev->cur_adv_instance == instance &&
1745 hci_dev_test_flag(hdev, HCI_LE_ADV))
1746 return 0;
1747
1748 hdev->cur_adv_instance = instance;
1749 if (ext_adv_capable(hdev)) {
1750 __hci_req_start_ext_adv(req, instance);
1751 } else {
1752 __hci_req_update_adv_data(req, instance);
1753 __hci_req_update_scan_rsp_data(req, instance);
1754 __hci_req_enable_advertising(req);
1755 }
1756
1757 return 0;
1758}
1759
1760static void cancel_adv_timeout(struct hci_dev *hdev)
1761{
1762 if (hdev->adv_instance_timeout) {
1763 hdev->adv_instance_timeout = 0;
1764 cancel_delayed_work(&hdev->adv_instance_expire);
1765 }
1766}
1767
1768/* For a single instance:
1769 * - force == true: The instance will be removed even when its remaining
1770 * lifetime is not zero.
1771 * - force == false: the instance will be deactivated but kept stored unless
1772 * the remaining lifetime is zero.
1773 *
1774 * For instance == 0x00:
1775 * - force == true: All instances will be removed regardless of their timeout
1776 * setting.
1777 * - force == false: Only instances that have a timeout will be removed.
1778 */
1779void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1780 struct hci_request *req, u8 instance,
1781 bool force)
1782{
1783 struct adv_info *adv_instance, *n, *next_instance = NULL;
1784 int err;
1785 u8 rem_inst;
1786
1787 /* Cancel any timeout concerning the removed instance(s). */
1788 if (!instance || hdev->cur_adv_instance == instance)
1789 cancel_adv_timeout(hdev);
1790
1791 /* Get the next instance to advertise BEFORE we remove
1792 * the current one. This can be the same instance again
1793 * if there is only one instance.
1794 */
1795 if (instance && hdev->cur_adv_instance == instance)
1796 next_instance = hci_get_next_instance(hdev, instance);
1797
1798 if (instance == 0x00) {
1799 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1800 list) {
1801 if (!(force || adv_instance->timeout))
1802 continue;
1803
1804 rem_inst = adv_instance->instance;
1805 err = hci_remove_adv_instance(hdev, rem_inst);
1806 if (!err)
1807 mgmt_advertising_removed(sk, hdev, rem_inst);
1808 }
1809 } else {
1810 adv_instance = hci_find_adv_instance(hdev, instance);
1811
1812 if (force || (adv_instance && adv_instance->timeout &&
1813 !adv_instance->remaining_time)) {
1814 /* Don't advertise a removed instance. */
1815 if (next_instance &&
1816 next_instance->instance == instance)
1817 next_instance = NULL;
1818
1819 err = hci_remove_adv_instance(hdev, instance);
1820 if (!err)
1821 mgmt_advertising_removed(sk, hdev, instance);
1822 }
1823 }
1824
1825 if (!req || !hdev_is_powered(hdev) ||
1826 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1827 return;
1828
1829 if (next_instance)
1830 __hci_req_schedule_adv_instance(req, next_instance->instance,
1831 false);
1832}
1833
1834static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1835{
1836 struct hci_dev *hdev = req->hdev;
1837
1838 /* If we're advertising or initiating an LE connection we can't
1839 * go ahead and change the random address at this time. This is
1840 * because the eventual initiator address used for the
1841 * subsequently created connection will be undefined (some
1842 * controllers use the new address and others the one we had
1843 * when the operation started).
1844 *
1845 * In this kind of scenario skip the update and let the random
1846 * address be updated at the next cycle.
1847 */
1848 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1849 hci_lookup_le_connect(hdev)) {
1850 BT_DBG("Deferring random address update");
1851 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1852 return;
1853 }
1854
1855 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1856}
1857
1858int hci_update_random_address(struct hci_request *req, bool require_privacy,
1859 bool use_rpa, u8 *own_addr_type)
1860{
1861 struct hci_dev *hdev = req->hdev;
1862 int err;
1863
1864 /* If privacy is enabled use a resolvable private address. If
1865 * current RPA has expired or there is something else than
1866 * the current RPA in use, then generate a new one.
1867 */
1868 if (use_rpa) {
1869 int to;
1870
1871 *own_addr_type = ADDR_LE_DEV_RANDOM;
1872
1873 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1874 !bacmp(&hdev->random_addr, &hdev->rpa))
1875 return 0;
1876
1877 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1878 if (err < 0) {
1879 bt_dev_err(hdev, "failed to generate new RPA");
1880 return err;
1881 }
1882
1883 set_random_addr(req, &hdev->rpa);
1884
1885 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1886 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1887
1888 return 0;
1889 }
1890
1891 /* In case of required privacy without resolvable private address,
1892 * use an non-resolvable private address. This is useful for active
1893 * scanning and non-connectable advertising.
1894 */
1895 if (require_privacy) {
1896 bdaddr_t nrpa;
1897
1898 while (true) {
1899 /* The non-resolvable private address is generated
1900 * from random six bytes with the two most significant
1901 * bits cleared.
1902 */
1903 get_random_bytes(&nrpa, 6);
1904 nrpa.b[5] &= 0x3f;
1905
1906 /* The non-resolvable private address shall not be
1907 * equal to the public address.
1908 */
1909 if (bacmp(&hdev->bdaddr, &nrpa))
1910 break;
1911 }
1912
1913 *own_addr_type = ADDR_LE_DEV_RANDOM;
1914 set_random_addr(req, &nrpa);
1915 return 0;
1916 }
1917
1918 /* If forcing static address is in use or there is no public
1919 * address use the static address as random address (but skip
1920 * the HCI command if the current random address is already the
1921 * static one.
1922 *
1923 * In case BR/EDR has been disabled on a dual-mode controller
1924 * and a static address has been configured, then use that
1925 * address instead of the public BR/EDR address.
1926 */
1927 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1928 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1929 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1930 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1931 *own_addr_type = ADDR_LE_DEV_RANDOM;
1932 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1933 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1934 &hdev->static_addr);
1935 return 0;
1936 }
1937
1938 /* Neither privacy nor static address is being used so use a
1939 * public address.
1940 */
1941 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1942
1943 return 0;
1944}
1945
1946static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1947{
1948 struct bdaddr_list *b;
1949
1950 list_for_each_entry(b, &hdev->whitelist, list) {
1951 struct hci_conn *conn;
1952
1953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1954 if (!conn)
1955 return true;
1956
1957 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1958 return true;
1959 }
1960
1961 return false;
1962}
1963
1964void __hci_req_update_scan(struct hci_request *req)
1965{
1966 struct hci_dev *hdev = req->hdev;
1967 u8 scan;
1968
1969 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1970 return;
1971
1972 if (!hdev_is_powered(hdev))
1973 return;
1974
1975 if (mgmt_powering_down(hdev))
1976 return;
1977
1978 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1979 disconnected_whitelist_entries(hdev))
1980 scan = SCAN_PAGE;
1981 else
1982 scan = SCAN_DISABLED;
1983
1984 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1985 scan |= SCAN_INQUIRY;
1986
1987 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1988 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1989 return;
1990
1991 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1992}
1993
1994static int update_scan(struct hci_request *req, unsigned long opt)
1995{
1996 hci_dev_lock(req->hdev);
1997 __hci_req_update_scan(req);
1998 hci_dev_unlock(req->hdev);
1999 return 0;
2000}
2001
2002static void scan_update_work(struct work_struct *work)
2003{
2004 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2005
2006 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2007}
2008
2009static int connectable_update(struct hci_request *req, unsigned long opt)
2010{
2011 struct hci_dev *hdev = req->hdev;
2012
2013 hci_dev_lock(hdev);
2014
2015 __hci_req_update_scan(req);
2016
2017 /* If BR/EDR is not enabled and we disable advertising as a
2018 * by-product of disabling connectable, we need to update the
2019 * advertising flags.
2020 */
2021 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2022 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2023
2024 /* Update the advertising parameters if necessary */
2025 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2026 !list_empty(&hdev->adv_instances)) {
2027 if (ext_adv_capable(hdev))
2028 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2029 else
2030 __hci_req_enable_advertising(req);
2031 }
2032
2033 __hci_update_background_scan(req);
2034
2035 hci_dev_unlock(hdev);
2036
2037 return 0;
2038}
2039
2040static void connectable_update_work(struct work_struct *work)
2041{
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
2043 connectable_update);
2044 u8 status;
2045
2046 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2047 mgmt_set_connectable_complete(hdev, status);
2048}
2049
2050static u8 get_service_classes(struct hci_dev *hdev)
2051{
2052 struct bt_uuid *uuid;
2053 u8 val = 0;
2054
2055 list_for_each_entry(uuid, &hdev->uuids, list)
2056 val |= uuid->svc_hint;
2057
2058 return val;
2059}
2060
2061void __hci_req_update_class(struct hci_request *req)
2062{
2063 struct hci_dev *hdev = req->hdev;
2064 u8 cod[3];
2065
2066 BT_DBG("%s", hdev->name);
2067
2068 if (!hdev_is_powered(hdev))
2069 return;
2070
2071 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2072 return;
2073
2074 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2075 return;
2076
2077 cod[0] = hdev->minor_class;
2078 cod[1] = hdev->major_class;
2079 cod[2] = get_service_classes(hdev);
2080
2081 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2082 cod[1] |= 0x20;
2083
2084 if (memcmp(cod, hdev->dev_class, 3) == 0)
2085 return;
2086
2087 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2088}
2089
2090static void write_iac(struct hci_request *req)
2091{
2092 struct hci_dev *hdev = req->hdev;
2093 struct hci_cp_write_current_iac_lap cp;
2094
2095 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2096 return;
2097
2098 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2099 /* Limited discoverable mode */
2100 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2101 cp.iac_lap[0] = 0x00; /* LIAC */
2102 cp.iac_lap[1] = 0x8b;
2103 cp.iac_lap[2] = 0x9e;
2104 cp.iac_lap[3] = 0x33; /* GIAC */
2105 cp.iac_lap[4] = 0x8b;
2106 cp.iac_lap[5] = 0x9e;
2107 } else {
2108 /* General discoverable mode */
2109 cp.num_iac = 1;
2110 cp.iac_lap[0] = 0x33; /* GIAC */
2111 cp.iac_lap[1] = 0x8b;
2112 cp.iac_lap[2] = 0x9e;
2113 }
2114
2115 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2116 (cp.num_iac * 3) + 1, &cp);
2117}
2118
2119static int discoverable_update(struct hci_request *req, unsigned long opt)
2120{
2121 struct hci_dev *hdev = req->hdev;
2122
2123 hci_dev_lock(hdev);
2124
2125 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2126 write_iac(req);
2127 __hci_req_update_scan(req);
2128 __hci_req_update_class(req);
2129 }
2130
2131 /* Advertising instances don't use the global discoverable setting, so
2132 * only update AD if advertising was enabled using Set Advertising.
2133 */
2134 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2135 __hci_req_update_adv_data(req, 0x00);
2136
2137 /* Discoverable mode affects the local advertising
2138 * address in limited privacy mode.
2139 */
2140 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2141 if (ext_adv_capable(hdev))
2142 __hci_req_start_ext_adv(req, 0x00);
2143 else
2144 __hci_req_enable_advertising(req);
2145 }
2146 }
2147
2148 hci_dev_unlock(hdev);
2149
2150 return 0;
2151}
2152
2153static void discoverable_update_work(struct work_struct *work)
2154{
2155 struct hci_dev *hdev = container_of(work, struct hci_dev,
2156 discoverable_update);
2157 u8 status;
2158
2159 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2160 mgmt_set_discoverable_complete(hdev, status);
2161}
2162
2163void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2164 u8 reason)
2165{
2166 switch (conn->state) {
2167 case BT_CONNECTED:
2168 case BT_CONFIG:
2169 if (conn->type == AMP_LINK) {
2170 struct hci_cp_disconn_phy_link cp;
2171
2172 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2173 cp.reason = reason;
2174 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2175 &cp);
2176 } else {
2177 struct hci_cp_disconnect dc;
2178
2179 dc.handle = cpu_to_le16(conn->handle);
2180 dc.reason = reason;
2181 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2182 }
2183
2184 conn->state = BT_DISCONN;
2185
2186 break;
2187 case BT_CONNECT:
2188 if (conn->type == LE_LINK) {
2189 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2190 break;
2191 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2192 0, NULL);
2193 } else if (conn->type == ACL_LINK) {
2194 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2195 break;
2196 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2197 6, &conn->dst);
2198 }
2199 break;
2200 case BT_CONNECT2:
2201 if (conn->type == ACL_LINK) {
2202 struct hci_cp_reject_conn_req rej;
2203
2204 bacpy(&rej.bdaddr, &conn->dst);
2205 rej.reason = reason;
2206
2207 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2208 sizeof(rej), &rej);
2209 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2210 struct hci_cp_reject_sync_conn_req rej;
2211
2212 bacpy(&rej.bdaddr, &conn->dst);
2213
2214 /* SCO rejection has its own limited set of
2215 * allowed error values (0x0D-0x0F) which isn't
2216 * compatible with most values passed to this
2217 * function. To be safe hard-code one of the
2218 * values that's suitable for SCO.
2219 */
2220 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2221
2222 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2223 sizeof(rej), &rej);
2224 }
2225 break;
2226 default:
2227 conn->state = BT_CLOSED;
2228 break;
2229 }
2230}
2231
2232static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2233{
2234 if (status)
2235 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2236}
2237
2238int hci_abort_conn(struct hci_conn *conn, u8 reason)
2239{
2240 struct hci_request req;
2241 int err;
2242
2243 hci_req_init(&req, conn->hdev);
2244
2245 __hci_abort_conn(&req, conn, reason);
2246
2247 err = hci_req_run(&req, abort_conn_complete);
2248 if (err && err != -ENODATA) {
2249 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2250 return err;
2251 }
2252
2253 return 0;
2254}
2255
2256static int update_bg_scan(struct hci_request *req, unsigned long opt)
2257{
2258 hci_dev_lock(req->hdev);
2259 __hci_update_background_scan(req);
2260 hci_dev_unlock(req->hdev);
2261 return 0;
2262}
2263
2264static void bg_scan_update(struct work_struct *work)
2265{
2266 struct hci_dev *hdev = container_of(work, struct hci_dev,
2267 bg_scan_update);
2268 struct hci_conn *conn;
2269 u8 status;
2270 int err;
2271
2272 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2273 if (!err)
2274 return;
2275
2276 hci_dev_lock(hdev);
2277
2278 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2279 if (conn)
2280 hci_le_conn_failed(conn, status);
2281
2282 hci_dev_unlock(hdev);
2283}
2284
2285static int le_scan_disable(struct hci_request *req, unsigned long opt)
2286{
2287 hci_req_add_le_scan_disable(req);
2288 return 0;
2289}
2290
2291static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2292{
2293 u8 length = opt;
2294 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2295 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2296 struct hci_cp_inquiry cp;
2297
2298 BT_DBG("%s", req->hdev->name);
2299
2300 hci_dev_lock(req->hdev);
2301 hci_inquiry_cache_flush(req->hdev);
2302 hci_dev_unlock(req->hdev);
2303
2304 memset(&cp, 0, sizeof(cp));
2305
2306 if (req->hdev->discovery.limited)
2307 memcpy(&cp.lap, liac, sizeof(cp.lap));
2308 else
2309 memcpy(&cp.lap, giac, sizeof(cp.lap));
2310
2311 cp.length = length;
2312
2313 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2314
2315 return 0;
2316}
2317
2318static void le_scan_disable_work(struct work_struct *work)
2319{
2320 struct hci_dev *hdev = container_of(work, struct hci_dev,
2321 le_scan_disable.work);
2322 u8 status;
2323
2324 BT_DBG("%s", hdev->name);
2325
2326 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2327 return;
2328
2329 cancel_delayed_work(&hdev->le_scan_restart);
2330
2331 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2332 if (status) {
2333 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2334 status);
2335 return;
2336 }
2337
2338 hdev->discovery.scan_start = 0;
2339
2340 /* If we were running LE only scan, change discovery state. If
2341 * we were running both LE and BR/EDR inquiry simultaneously,
2342 * and BR/EDR inquiry is already finished, stop discovery,
2343 * otherwise BR/EDR inquiry will stop discovery when finished.
2344 * If we will resolve remote device name, do not change
2345 * discovery state.
2346 */
2347
2348 if (hdev->discovery.type == DISCOV_TYPE_LE)
2349 goto discov_stopped;
2350
2351 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2352 return;
2353
2354 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2355 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2356 hdev->discovery.state != DISCOVERY_RESOLVING)
2357 goto discov_stopped;
2358
2359 return;
2360 }
2361
2362 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2363 HCI_CMD_TIMEOUT, &status);
2364 if (status) {
2365 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2366 goto discov_stopped;
2367 }
2368
2369 return;
2370
2371discov_stopped:
2372 hci_dev_lock(hdev);
2373 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2374 hci_dev_unlock(hdev);
2375}
2376
2377static int le_scan_restart(struct hci_request *req, unsigned long opt)
2378{
2379 struct hci_dev *hdev = req->hdev;
2380
2381 /* If controller is not scanning we are done. */
2382 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2383 return 0;
2384
2385 hci_req_add_le_scan_disable(req);
2386
2387 if (use_ext_scan(hdev)) {
2388 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2389
2390 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2391 ext_enable_cp.enable = LE_SCAN_ENABLE;
2392 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2393
2394 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2395 sizeof(ext_enable_cp), &ext_enable_cp);
2396 } else {
2397 struct hci_cp_le_set_scan_enable cp;
2398
2399 memset(&cp, 0, sizeof(cp));
2400 cp.enable = LE_SCAN_ENABLE;
2401 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2402 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2403 }
2404
2405 return 0;
2406}
2407
2408static void le_scan_restart_work(struct work_struct *work)
2409{
2410 struct hci_dev *hdev = container_of(work, struct hci_dev,
2411 le_scan_restart.work);
2412 unsigned long timeout, duration, scan_start, now;
2413 u8 status;
2414
2415 BT_DBG("%s", hdev->name);
2416
2417 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2418 if (status) {
2419 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2420 status);
2421 return;
2422 }
2423
2424 hci_dev_lock(hdev);
2425
2426 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2427 !hdev->discovery.scan_start)
2428 goto unlock;
2429
2430 /* When the scan was started, hdev->le_scan_disable has been queued
2431 * after duration from scan_start. During scan restart this job
2432 * has been canceled, and we need to queue it again after proper
2433 * timeout, to make sure that scan does not run indefinitely.
2434 */
2435 duration = hdev->discovery.scan_duration;
2436 scan_start = hdev->discovery.scan_start;
2437 now = jiffies;
2438 if (now - scan_start <= duration) {
2439 int elapsed;
2440
2441 if (now >= scan_start)
2442 elapsed = now - scan_start;
2443 else
2444 elapsed = ULONG_MAX - scan_start + now;
2445
2446 timeout = duration - elapsed;
2447 } else {
2448 timeout = 0;
2449 }
2450
2451 queue_delayed_work(hdev->req_workqueue,
2452 &hdev->le_scan_disable, timeout);
2453
2454unlock:
2455 hci_dev_unlock(hdev);
2456}
2457
2458static int active_scan(struct hci_request *req, unsigned long opt)
2459{
2460 uint16_t interval = opt;
2461 struct hci_dev *hdev = req->hdev;
2462 u8 own_addr_type;
2463 int err;
2464
2465 BT_DBG("%s", hdev->name);
2466
2467 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2468 hci_dev_lock(hdev);
2469
2470 /* Don't let discovery abort an outgoing connection attempt
2471 * that's using directed advertising.
2472 */
2473 if (hci_lookup_le_connect(hdev)) {
2474 hci_dev_unlock(hdev);
2475 return -EBUSY;
2476 }
2477
2478 cancel_adv_timeout(hdev);
2479 hci_dev_unlock(hdev);
2480
2481 __hci_req_disable_advertising(req);
2482 }
2483
2484 /* If controller is scanning, it means the background scanning is
2485 * running. Thus, we should temporarily stop it in order to set the
2486 * discovery scanning parameters.
2487 */
2488 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2489 hci_req_add_le_scan_disable(req);
2490
2491 /* All active scans will be done with either a resolvable private
2492 * address (when privacy feature has been enabled) or non-resolvable
2493 * private address.
2494 */
2495 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2496 &own_addr_type);
2497 if (err < 0)
2498 own_addr_type = ADDR_LE_DEV_PUBLIC;
2499
2500 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2501 own_addr_type, 0);
2502 return 0;
2503}
2504
2505static int interleaved_discov(struct hci_request *req, unsigned long opt)
2506{
2507 int err;
2508
2509 BT_DBG("%s", req->hdev->name);
2510
2511 err = active_scan(req, opt);
2512 if (err)
2513 return err;
2514
2515 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2516}
2517
2518static void start_discovery(struct hci_dev *hdev, u8 *status)
2519{
2520 unsigned long timeout;
2521
2522 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2523
2524 switch (hdev->discovery.type) {
2525 case DISCOV_TYPE_BREDR:
2526 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2527 hci_req_sync(hdev, bredr_inquiry,
2528 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2529 status);
2530 return;
2531 case DISCOV_TYPE_INTERLEAVED:
2532 /* When running simultaneous discovery, the LE scanning time
2533 * should occupy the whole discovery time sine BR/EDR inquiry
2534 * and LE scanning are scheduled by the controller.
2535 *
2536 * For interleaving discovery in comparison, BR/EDR inquiry
2537 * and LE scanning are done sequentially with separate
2538 * timeouts.
2539 */
2540 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2541 &hdev->quirks)) {
2542 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2543 /* During simultaneous discovery, we double LE scan
2544 * interval. We must leave some time for the controller
2545 * to do BR/EDR inquiry.
2546 */
2547 hci_req_sync(hdev, interleaved_discov,
2548 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2549 status);
2550 break;
2551 }
2552
2553 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2554 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2555 HCI_CMD_TIMEOUT, status);
2556 break;
2557 case DISCOV_TYPE_LE:
2558 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2559 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2560 HCI_CMD_TIMEOUT, status);
2561 break;
2562 default:
2563 *status = HCI_ERROR_UNSPECIFIED;
2564 return;
2565 }
2566
2567 if (*status)
2568 return;
2569
2570 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2571
2572 /* When service discovery is used and the controller has a
2573 * strict duplicate filter, it is important to remember the
2574 * start and duration of the scan. This is required for
2575 * restarting scanning during the discovery phase.
2576 */
2577 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2578 hdev->discovery.result_filtering) {
2579 hdev->discovery.scan_start = jiffies;
2580 hdev->discovery.scan_duration = timeout;
2581 }
2582
2583 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2584 timeout);
2585}
2586
2587bool hci_req_stop_discovery(struct hci_request *req)
2588{
2589 struct hci_dev *hdev = req->hdev;
2590 struct discovery_state *d = &hdev->discovery;
2591 struct hci_cp_remote_name_req_cancel cp;
2592 struct inquiry_entry *e;
2593 bool ret = false;
2594
2595 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2596
2597 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2598 if (test_bit(HCI_INQUIRY, &hdev->flags))
2599 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2600
2601 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2602 cancel_delayed_work(&hdev->le_scan_disable);
2603 hci_req_add_le_scan_disable(req);
2604 }
2605
2606 ret = true;
2607 } else {
2608 /* Passive scanning */
2609 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2610 hci_req_add_le_scan_disable(req);
2611 ret = true;
2612 }
2613 }
2614
2615 /* No further actions needed for LE-only discovery */
2616 if (d->type == DISCOV_TYPE_LE)
2617 return ret;
2618
2619 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2620 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2621 NAME_PENDING);
2622 if (!e)
2623 return ret;
2624
2625 bacpy(&cp.bdaddr, &e->data.bdaddr);
2626 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2627 &cp);
2628 ret = true;
2629 }
2630
2631 return ret;
2632}
2633
2634static int stop_discovery(struct hci_request *req, unsigned long opt)
2635{
2636 hci_dev_lock(req->hdev);
2637 hci_req_stop_discovery(req);
2638 hci_dev_unlock(req->hdev);
2639
2640 return 0;
2641}
2642
2643static void discov_update(struct work_struct *work)
2644{
2645 struct hci_dev *hdev = container_of(work, struct hci_dev,
2646 discov_update);
2647 u8 status = 0;
2648
2649 switch (hdev->discovery.state) {
2650 case DISCOVERY_STARTING:
2651 start_discovery(hdev, &status);
2652 mgmt_start_discovery_complete(hdev, status);
2653 if (status)
2654 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2655 else
2656 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2657 break;
2658 case DISCOVERY_STOPPING:
2659 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2660 mgmt_stop_discovery_complete(hdev, status);
2661 if (!status)
2662 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2663 break;
2664 case DISCOVERY_STOPPED:
2665 default:
2666 return;
2667 }
2668}
2669
2670static void discov_off(struct work_struct *work)
2671{
2672 struct hci_dev *hdev = container_of(work, struct hci_dev,
2673 discov_off.work);
2674
2675 BT_DBG("%s", hdev->name);
2676
2677 hci_dev_lock(hdev);
2678
2679 /* When discoverable timeout triggers, then just make sure
2680 * the limited discoverable flag is cleared. Even in the case
2681 * of a timeout triggered from general discoverable, it is
2682 * safe to unconditionally clear the flag.
2683 */
2684 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2685 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2686 hdev->discov_timeout = 0;
2687
2688 hci_dev_unlock(hdev);
2689
2690 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2691 mgmt_new_settings(hdev);
2692}
2693
2694static int powered_update_hci(struct hci_request *req, unsigned long opt)
2695{
2696 struct hci_dev *hdev = req->hdev;
2697 u8 link_sec;
2698
2699 hci_dev_lock(hdev);
2700
2701 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2702 !lmp_host_ssp_capable(hdev)) {
2703 u8 mode = 0x01;
2704
2705 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2706
2707 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2708 u8 support = 0x01;
2709
2710 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2711 sizeof(support), &support);
2712 }
2713 }
2714
2715 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2716 lmp_bredr_capable(hdev)) {
2717 struct hci_cp_write_le_host_supported cp;
2718
2719 cp.le = 0x01;
2720 cp.simul = 0x00;
2721
2722 /* Check first if we already have the right
2723 * host state (host features set)
2724 */
2725 if (cp.le != lmp_host_le_capable(hdev) ||
2726 cp.simul != lmp_host_le_br_capable(hdev))
2727 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2728 sizeof(cp), &cp);
2729 }
2730
2731 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2732 /* Make sure the controller has a good default for
2733 * advertising data. This also applies to the case
2734 * where BR/EDR was toggled during the AUTO_OFF phase.
2735 */
2736 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2737 list_empty(&hdev->adv_instances)) {
2738 int err;
2739
2740 if (ext_adv_capable(hdev)) {
2741 err = __hci_req_setup_ext_adv_instance(req,
2742 0x00);
2743 if (!err)
2744 __hci_req_update_scan_rsp_data(req,
2745 0x00);
2746 } else {
2747 err = 0;
2748 __hci_req_update_adv_data(req, 0x00);
2749 __hci_req_update_scan_rsp_data(req, 0x00);
2750 }
2751
2752 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2753 if (!ext_adv_capable(hdev))
2754 __hci_req_enable_advertising(req);
2755 else if (!err)
2756 __hci_req_enable_ext_advertising(req);
2757 }
2758 } else if (!list_empty(&hdev->adv_instances)) {
2759 struct adv_info *adv_instance;
2760
2761 adv_instance = list_first_entry(&hdev->adv_instances,
2762 struct adv_info, list);
2763 __hci_req_schedule_adv_instance(req,
2764 adv_instance->instance,
2765 true);
2766 }
2767 }
2768
2769 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2770 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2771 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2772 sizeof(link_sec), &link_sec);
2773
2774 if (lmp_bredr_capable(hdev)) {
2775 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2776 __hci_req_write_fast_connectable(req, true);
2777 else
2778 __hci_req_write_fast_connectable(req, false);
2779 __hci_req_update_scan(req);
2780 __hci_req_update_class(req);
2781 __hci_req_update_name(req);
2782 __hci_req_update_eir(req);
2783 }
2784
2785 hci_dev_unlock(hdev);
2786 return 0;
2787}
2788
2789int __hci_req_hci_power_on(struct hci_dev *hdev)
2790{
2791 /* Register the available SMP channels (BR/EDR and LE) only when
2792 * successfully powering on the controller. This late
2793 * registration is required so that LE SMP can clearly decide if
2794 * the public address or static address is used.
2795 */
2796 smp_register(hdev);
2797
2798 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2799 NULL);
2800}
2801
2802void hci_request_setup(struct hci_dev *hdev)
2803{
2804 INIT_WORK(&hdev->discov_update, discov_update);
2805 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2806 INIT_WORK(&hdev->scan_update, scan_update_work);
2807 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2808 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2809 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2810 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2811 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2812 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2813}
2814
2815void hci_request_cancel_all(struct hci_dev *hdev)
2816{
2817 hci_req_sync_cancel(hdev, ENODEV);
2818
2819 cancel_work_sync(&hdev->discov_update);
2820 cancel_work_sync(&hdev->bg_scan_update);
2821 cancel_work_sync(&hdev->scan_update);
2822 cancel_work_sync(&hdev->connectable_update);
2823 cancel_work_sync(&hdev->discoverable_update);
2824 cancel_delayed_work_sync(&hdev->discov_off);
2825 cancel_delayed_work_sync(&hdev->le_scan_disable);
2826 cancel_delayed_work_sync(&hdev->le_scan_restart);
2827
2828 if (hdev->adv_instance_timeout) {
2829 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2830 hdev->adv_instance_timeout = 0;
2831 }
2832}