/* hci_core.c - HCI core Bluetooth handling */
/*
* Copyright (c) 2017 Nordic Semiconductor ASA
* Copyright (c) 2015-2016 Intel Corporation
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <zephyr.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#include <sys/atomic.h>
#include <sys/util.h>
#include <sys/slist.h>
#include <sys/byteorder.h>
#include <debug/stack.h>
#include <sys/__assert.h>
#include <soc.h>
#include <settings/settings.h>
#include <bluetooth/bluetooth.h>
#include <bluetooth/conn.h>
#include <bluetooth/l2cap.h>
#include <bluetooth/hci.h>
#include <bluetooth/hci_vs.h>
#include <drivers/bluetooth/hci_driver.h>
#define BT_DBG_ENABLED IS_ENABLED(CONFIG_BT_DEBUG_HCI_CORE)
#define LOG_MODULE_NAME bt_hci_core
#include "common/log.h"
#include "common/rpa.h"
#include "keys.h"
#include "monitor.h"
#include "hci_core.h"
#include "hci_ecc.h"
#include "ecc.h"
#include "conn_internal.h"
#include "audio/iso_internal.h"
#include "l2cap_internal.h"
#include "gatt_internal.h"
#include "smp.h"
#include "crypto.h"
#include "settings.h"
#if IS_ENABLED(CONFIG_BT_DF)
#include "direction_internal.h"
#endif /* CONFIG_BT_DF */
#if !defined(CONFIG_BT_EXT_ADV_LEGACY_SUPPORT)
#undef BT_FEAT_LE_EXT_ADV
#define BT_FEAT_LE_EXT_ADV(feat) 1
#endif
#define RPA_TIMEOUT_MS (CONFIG_BT_RPA_TIMEOUT * MSEC_PER_SEC)
#define RPA_TIMEOUT K_MSEC(RPA_TIMEOUT_MS)
#define HCI_CMD_TIMEOUT K_SECONDS(10)
/* Stacks for the threads */
#if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
static struct k_thread rx_thread_data;
static K_KERNEL_STACK_DEFINE(rx_thread_stack, CONFIG_BT_RX_STACK_SIZE);
#endif
static struct k_thread tx_thread_data;
static K_KERNEL_STACK_DEFINE(tx_thread_stack, CONFIG_BT_HCI_TX_STACK_SIZE);
static void init_work(struct k_work *work);
struct bt_dev bt_dev = {
.init = Z_WORK_INITIALIZER(init_work),
/* Give cmd_sem allowing to send first HCI_Reset cmd, the only
* exception is if the controller requests to wait for an
* initial Command Complete for NOP.
*/
#if !defined(CONFIG_BT_WAIT_NOP)
.ncmd_sem = Z_SEM_INITIALIZER(bt_dev.ncmd_sem, 1, 1),
#else
.ncmd_sem = Z_SEM_INITIALIZER(bt_dev.ncmd_sem, 0, 1),
#endif
.cmd_tx_queue = Z_FIFO_INITIALIZER(bt_dev.cmd_tx_queue),
#if !defined(CONFIG_BT_RECV_IS_RX_THREAD)
.rx_queue = Z_FIFO_INITIALIZER(bt_dev.rx_queue),
#endif
};
static bt_ready_cb_t ready_cb;
static bt_le_scan_cb_t *scan_dev_found_cb;
#if defined(CONFIG_BT_OBSERVER)
static int set_le_scan_enable(uint8_t enable);
static sys_slist_t scan_cbs = SYS_SLIST_STATIC_INIT(&scan_cbs);
#endif /* defined(CONFIG_BT_OBSERVER) */
#if defined(CONFIG_BT_EXT_ADV)
static struct bt_le_ext_adv adv_pool[CONFIG_BT_EXT_ADV_MAX_ADV_SET];
#if defined(CONFIG_BT_PER_ADV_SYNC)
static struct bt_le_per_adv_sync *get_pending_per_adv_sync(void);
static struct bt_le_per_adv_sync per_adv_sync_pool[CONFIG_BT_PER_ADV_SYNC_MAX];
static sys_slist_t pa_sync_cbs = SYS_SLIST_STATIC_INIT(&pa_sync_cbs);
#endif /* defined(CONFIG_BT_PER_ADV_SYNC) */
#endif /* defined(CONFIG_BT_EXT_ADV) */
#if defined(CONFIG_BT_HCI_VS_EVT_USER)
static bt_hci_vnd_evt_cb_t *hci_vnd_evt_cb;
#endif /* CONFIG_BT_HCI_VS_EVT_USER */
#if defined(CONFIG_BT_ECC)
static uint8_t pub_key[64];
static struct bt_pub_key_cb *pub_key_cb;
static bt_dh_key_cb_t dh_key_cb;
#endif /* CONFIG_BT_ECC */
#if defined(CONFIG_BT_BREDR)
static bt_br_discovery_cb_t *discovery_cb;
struct bt_br_discovery_result *discovery_results;
static size_t discovery_results_size;
static size_t discovery_results_count;
#endif /* CONFIG_BT_BREDR */
struct cmd_data {
/** HCI status of the command completion */
uint8_t status;
/** The command OpCode that the buffer contains */
uint16_t opcode;
/** The state to update when command completes with success. */
struct bt_hci_cmd_state_set *state;
/** Used by bt_hci_cmd_send_sync. */
struct k_sem *sync;
};
static struct cmd_data cmd_data[CONFIG_BT_HCI_CMD_COUNT];
#define cmd(buf) (&cmd_data[net_buf_id(buf)])
#define acl(buf) ((struct acl_data *)net_buf_user_data(buf))
void bt_hci_cmd_data_state_set(struct net_buf *buf,
struct bt_hci_cmd_state_set *state)
{
cmd(buf)->state = state;
}
/* HCI command buffers. Derive the needed size from BT_BUF_RX_SIZE since
* the same buffer is also used for the response.
*/
#define CMD_BUF_SIZE BT_BUF_RX_SIZE
NET_BUF_POOL_FIXED_DEFINE(hci_cmd_pool, CONFIG_BT_HCI_CMD_COUNT,
CMD_BUF_SIZE, NULL);
struct event_handler {
uint8_t event;
uint8_t min_len;
void (*handler)(struct net_buf *buf);
};
#define EVENT_HANDLER(_evt, _handler, _min_len) \
{ \
.event = _evt, \
.handler = _handler, \
.min_len = _min_len, \
}
static inline void handle_event(uint8_t event, struct net_buf *buf,
const struct event_handler *handlers,
size_t num_handlers)
{
size_t i;
for (i = 0; i < num_handlers; i++) {
const struct event_handler *handler = &handlers[i];
if (handler->event != event) {
continue;
}
if (buf->len < handler->min_len) {
BT_ERR("Too small (%u bytes) event 0x%02x",
buf->len, event);
return;
}
handler->handler(buf);
return;
}
BT_WARN("Unhandled event 0x%02x len %u: %s", event,
buf->len, bt_hex(buf->data, buf->len));
}
#if defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL)
void bt_hci_host_num_completed_packets(struct net_buf *buf)
{
struct bt_hci_cp_host_num_completed_packets *cp;
uint16_t handle = acl(buf)->handle;
struct bt_hci_handle_count *hc;
struct bt_conn *conn;
net_buf_destroy(buf);
/* Do nothing if controller to host flow control is not supported */
if (!BT_CMD_TEST(bt_dev.supported_commands, 10, 5)) {
return;
}
conn = bt_conn_lookup_index(acl(buf)->index);
if (!conn) {
BT_WARN("Unable to look up conn with index 0x%02x",
acl(buf)->index);
return;
}
if (!bt_conn_is_handle_valid(conn)) {
BT_WARN("Not reporting packet for non-connected conn");
bt_conn_unref(conn);
return;
}
bt_conn_unref(conn);
BT_DBG("Reporting completed packet for handle %u", handle);
buf = bt_hci_cmd_create(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS,
sizeof(*cp) + sizeof(*hc));
if (!buf) {
BT_ERR("Unable to allocate new HCI command");
return;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->num_handles = sys_cpu_to_le16(1);
hc = net_buf_add(buf, sizeof(*hc));
hc->handle = sys_cpu_to_le16(handle);
hc->count = sys_cpu_to_le16(1);
bt_hci_cmd_send(BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS, buf);
}
#endif /* defined(CONFIG_BT_HCI_ACL_FLOW_CONTROL) */
struct net_buf *bt_hci_cmd_create(uint16_t opcode, uint8_t param_len)
{
struct bt_hci_cmd_hdr *hdr;
struct net_buf *buf;
BT_DBG("opcode 0x%04x param_len %u", opcode, param_len);
buf = net_buf_alloc(&hci_cmd_pool, K_FOREVER);
__ASSERT_NO_MSG(buf);
BT_DBG("buf %p", buf);
net_buf_reserve(buf, BT_BUF_RESERVE);
bt_buf_set_type(buf, BT_BUF_CMD);
cmd(buf)->opcode = opcode;
cmd(buf)->sync = NULL;
cmd(buf)->state = NULL;
hdr = net_buf_add(buf, sizeof(*hdr));
hdr->opcode = sys_cpu_to_le16(opcode);
hdr->param_len = param_len;
return buf;
}
int bt_hci_cmd_send(uint16_t opcode, struct net_buf *buf)
{
if (!buf) {
buf = bt_hci_cmd_create(opcode, 0);
if (!buf) {
return -ENOBUFS;
}
}
BT_DBG("opcode 0x%04x len %u", opcode, buf->len);
/* Host Number of Completed Packets can ignore the ncmd value
* and does not generate any cmd complete/status events.
*/
if (opcode == BT_HCI_OP_HOST_NUM_COMPLETED_PACKETS) {
int err;
err = bt_send(buf);
if (err) {
BT_ERR("Unable to send to driver (err %d)", err);
net_buf_unref(buf);
}
return err;
}
net_buf_put(&bt_dev.cmd_tx_queue, buf);
return 0;
}
int bt_hci_cmd_send_sync(uint16_t opcode, struct net_buf *buf,
struct net_buf **rsp)
{
struct k_sem sync_sem;
uint8_t status;
int err;
if (!buf) {
buf = bt_hci_cmd_create(opcode, 0);
if (!buf) {
return -ENOBUFS;
}
}
BT_DBG("buf %p opcode 0x%04x len %u", buf, opcode, buf->len);
k_sem_init(&sync_sem, 0, 1);
cmd(buf)->sync = &sync_sem;
/* Make sure the buffer stays around until the command completes */
net_buf_ref(buf);
net_buf_put(&bt_dev.cmd_tx_queue, buf);
err = k_sem_take(&sync_sem, HCI_CMD_TIMEOUT);
BT_ASSERT_MSG(err == 0, "k_sem_take failed with err %d", err);
status = cmd(buf)->status;
if (status) {
BT_WARN("opcode 0x%04x status 0x%02x", opcode, status);
net_buf_unref(buf);
switch (status) {
case BT_HCI_ERR_CONN_LIMIT_EXCEEDED:
return -ECONNREFUSED;
default:
return -EIO;
}
}
BT_DBG("rsp %p opcode 0x%04x len %u", buf, opcode, buf->len);
if (rsp) {
*rsp = buf;
} else {
net_buf_unref(buf);
}
return 0;
}
#if defined(CONFIG_BT_OBSERVER) || defined(CONFIG_BT_BROADCASTER)
const bt_addr_le_t *bt_lookup_id_addr(uint8_t id, const bt_addr_le_t *addr)
{
if (IS_ENABLED(CONFIG_BT_SMP)) {
struct bt_keys *keys;
keys = bt_keys_find_irk(id, addr);
if (keys) {
BT_DBG("Identity %s matched RPA %s",
bt_addr_le_str(&keys->addr),
bt_addr_le_str(addr));
return &keys->addr;
}
}
return addr;
}
#endif /* CONFIG_BT_OBSERVER || CONFIG_BT_CONN */
#if defined(CONFIG_BT_EXT_ADV)
uint8_t bt_le_ext_adv_get_index(struct bt_le_ext_adv *adv)
{
ptrdiff_t index = adv - adv_pool;
__ASSERT(0 <= index && index < ARRAY_SIZE(adv_pool),
"Invalid bt_adv pointer");
return (uint8_t)index;
}
static struct bt_le_ext_adv *adv_new(void)
{
struct bt_le_ext_adv *adv = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(adv_pool); i++) {
if (!atomic_test_bit(adv_pool[i].flags, BT_ADV_CREATED)) {
adv = &adv_pool[i];
break;
}
}
if (!adv) {
return NULL;
}
(void)memset(adv, 0, sizeof(*adv));
atomic_set_bit(adv_pool[i].flags, BT_ADV_CREATED);
adv->handle = i;
return adv;
}
static void adv_delete(struct bt_le_ext_adv *adv)
{
atomic_clear_bit(adv->flags, BT_ADV_CREATED);
}
#if defined(CONFIG_BT_BROADCASTER)
static struct bt_le_ext_adv *bt_adv_lookup_handle(uint8_t handle)
{
if (handle < ARRAY_SIZE(adv_pool) &&
atomic_test_bit(adv_pool[handle].flags, BT_ADV_CREATED)) {
return &adv_pool[handle];
}
return NULL;
}
#endif /* CONFIG_BT_BROADCASTER */
#endif /* defined(CONFIG_BT_EXT_ADV) */
static void bt_adv_foreach(void (*func)(struct bt_le_ext_adv *adv, void *data),
void *data)
{
#if defined(CONFIG_BT_EXT_ADV)
for (size_t i = 0; i < ARRAY_SIZE(adv_pool); i++) {
if (atomic_test_bit(adv_pool[i].flags, BT_ADV_CREATED)) {
func(&adv_pool[i], data);
}
}
#else
func(&bt_dev.adv, data);
#endif /* defined(CONFIG_BT_EXT_ADV) */
}
static struct bt_le_ext_adv *adv_new_legacy(void)
{
#if defined(CONFIG_BT_EXT_ADV)
if (bt_dev.adv) {
return NULL;
}
bt_dev.adv = adv_new();
return bt_dev.adv;
#else
return &bt_dev.adv;
#endif
}
static void adv_delete_legacy(void)
{
#if defined(CONFIG_BT_EXT_ADV)
if (bt_dev.adv) {
atomic_clear_bit(bt_dev.adv->flags, BT_ADV_CREATED);
bt_dev.adv = NULL;
}
#endif
}
struct bt_le_ext_adv *bt_adv_lookup_legacy(void)
{
#if defined(CONFIG_BT_EXT_ADV)
return bt_dev.adv;
#else
return &bt_dev.adv;
#endif
}
static int set_le_adv_enable_legacy(struct bt_le_ext_adv *adv, bool enable)
{
struct net_buf *buf;
struct bt_hci_cmd_state_set state;
int err;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_ENABLE, 1);
if (!buf) {
return -ENOBUFS;
}
if (enable) {
net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
} else {
net_buf_add_u8(buf, BT_HCI_LE_ADV_DISABLE);
}
bt_hci_cmd_state_set_init(&state, adv->flags, BT_ADV_ENABLED, enable);
cmd(buf)->state = &state;
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_ADV_ENABLE, buf, NULL);
if (err) {
return err;
}
return 0;
}
static int set_random_address(const bt_addr_t *addr)
{
struct net_buf *buf;
int err;
BT_DBG("%s", bt_addr_str(addr));
/* Do nothing if we already have the right address */
if (!bt_addr_cmp(addr, &bt_dev.random_addr.a)) {
return 0;
}
buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, sizeof(*addr));
if (!buf) {
return -ENOBUFS;
}
net_buf_add_mem(buf, addr, sizeof(*addr));
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_RANDOM_ADDRESS, buf, NULL);
if (err) {
return err;
}
bt_addr_copy(&bt_dev.random_addr.a, addr);
bt_dev.random_addr.type = BT_ADDR_LE_RANDOM;
return 0;
}
static int set_le_adv_enable_ext(struct bt_le_ext_adv *adv,
bool enable,
const struct bt_le_ext_adv_start_param *param)
{
struct net_buf *buf;
struct bt_hci_cmd_state_set state;
int err;
buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE, 6);
if (!buf) {
return -ENOBUFS;
}
if (enable) {
net_buf_add_u8(buf, BT_HCI_LE_ADV_ENABLE);
} else {
net_buf_add_u8(buf, BT_HCI_LE_ADV_DISABLE);
}
net_buf_add_u8(buf, 1);
net_buf_add_u8(buf, adv->handle);
net_buf_add_le16(buf, param ? sys_cpu_to_le16(param->timeout) : 0);
net_buf_add_u8(buf, param ? param->num_events : 0);
bt_hci_cmd_state_set_init(&state, adv->flags, BT_ADV_ENABLED, enable);
cmd(buf)->state = &state;
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_EXT_ADV_ENABLE, buf, NULL);
if (err) {
return err;
}
return 0;
}
static int set_le_adv_enable(struct bt_le_ext_adv *adv, bool enable)
{
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
return set_le_adv_enable_ext(adv, enable, NULL);
}
return set_le_adv_enable_legacy(adv, enable);
}
static int set_adv_random_address(struct bt_le_ext_adv *adv,
const bt_addr_t *addr)
{
struct bt_hci_cp_le_set_adv_set_random_addr *cp;
struct net_buf *buf;
int err;
if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
return set_random_address(addr);
}
BT_DBG("%s", bt_addr_str(addr));
if (!atomic_test_bit(adv->flags, BT_ADV_PARAMS_SET)) {
bt_addr_copy(&adv->random_addr.a, addr);
adv->random_addr.type = BT_ADDR_LE_RANDOM;
atomic_set_bit(adv->flags, BT_ADV_RANDOM_ADDR_PENDING);
return 0;
}
buf = bt_hci_cmd_create(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR,
sizeof(*cp));
if (!buf) {
return -ENOBUFS;
}
cp = net_buf_add(buf, sizeof(*cp));
cp->handle = adv->handle;
bt_addr_copy(&cp->bdaddr, addr);
err = bt_hci_cmd_send_sync(BT_HCI_OP_LE_SET_ADV_SET_RANDOM_ADDR, buf,
NULL);
if (err) {
return err;
}
bt_addr_copy(&adv->random_addr.a, addr);
adv->random_addr.type = BT_ADDR_LE_RANDOM;
return 0;
}
static void adv_rpa_invalidate(struct bt_le_ext_adv *adv, void *data)
{
if (!atomic_test_bit(adv->flags, BT_ADV_LIMITED)) {
atomic_clear_bit(adv->flags, BT_ADV_RPA_VALID);
}
}
static void le_rpa_invalidate(void)
{
/* RPA must be submitted */
atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_TIMEOUT_SET);
/* Invalidate RPA */
if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
atomic_test_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED))) {
atomic_clear_bit(bt_dev.flags, BT_DEV_RPA_VALID);
}
bt_adv_foreach(adv_rpa_invalidate, NULL);
}
#if defined(CONFIG_BT_PRIVACY)
static void le_rpa_timeout_submit(void)
{
/* Check if RPA timer is running. */
if (atomic_test_and_set_bit(bt_dev.flags, BT_DEV_RPA_TIMEOUT_SET)) {
return;
}
k_delayed_work_submit(&bt_dev.rpa_update, RPA_TIMEOUT);
}
/* this function sets new RPA only if current one is no longer valid */
static int le_set_private_addr(uint8_t id)
{
bt_addr_t rpa;
int err;
/* check if RPA is valid */
if (atomic_test_bit(bt_dev.flags, BT_DEV_RPA_VALID)) {
return 0;
}
err = bt_rpa_create(bt_dev.irk[id], &rpa);
if (!err) {
err = set_random_address(&rpa);
if (!err) {
atomic_set_bit(bt_dev.flags, BT_DEV_RPA_VALID);
}
}
le_rpa_timeout_submit();
return err;
}
static int le_adv_set_private_addr(struct bt_le_ext_adv *adv)
{
bt_addr_t rpa;
int err;
if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
return le_set_private_addr(adv->id);
}
/* check if RPA is valid */
if (atomic_test_bit(adv->flags, BT_ADV_RPA_VALID)) {
return 0;
}
if (adv == bt_adv_lookup_legacy() && adv->id == BT_ID_DEFAULT) {
/* Make sure that a Legacy advertiser using default ID has same
* RPA address as scanner roles.
*/
err = le_set_private_addr(BT_ID_DEFAULT);
if (err) {
return err;
}
err = set_adv_random_address(adv, &bt_dev.random_addr.a);
if (!err) {
atomic_set_bit(adv->flags, BT_ADV_RPA_VALID);
}
return 0;
}
err = bt_rpa_create(bt_dev.irk[adv->id], &rpa);
if (!err) {
err = set_adv_random_address(adv, &rpa);
if (!err) {
atomic_set_bit(adv->flags, BT_ADV_RPA_VALID);
}
}
if (!atomic_test_bit(adv->flags, BT_ADV_LIMITED)) {
le_rpa_timeout_submit();
}
return err;
}
#else
static int le_set_private_addr(uint8_t id)
{
bt_addr_t nrpa;
int err;
err = bt_rand(nrpa.val, sizeof(nrpa.val));
if (err) {
return err;
}
BT_ADDR_SET_NRPA(&nrpa);
return set_random_address(&nrpa);
}
static int le_adv_set_private_addr(struct bt_le_ext_adv *adv)
{
bt_addr_t nrpa;
int err;
err = bt_rand(nrpa.val, sizeof(nrpa.val));
if (err) {
return err;
}
BT_ADDR_SET_NRPA(&nrpa);
return set_adv_random_address(adv, &nrpa);
}
#endif /* defined(CONFIG_BT_PRIVACY) */
static void adv_update_rpa(struct bt_le_ext_adv *adv, void *data)
{
if (atomic_test_bit(adv->flags, BT_ADV_ENABLED) &&
!atomic_test_bit(adv->flags, BT_ADV_LIMITED) &&
!atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
int err;
set_le_adv_enable_ext(adv, false, NULL);
err = le_adv_set_private_addr(adv);
if (err) {
BT_WARN("Failed to update advertiser RPA address (%d)",
err);
}
set_le_adv_enable_ext(adv, true, NULL);
}
}
static void le_update_private_addr(void)
{
struct bt_le_ext_adv *adv = NULL;
bool adv_enabled = false;
uint8_t id = BT_ID_DEFAULT;
int err;
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
bt_adv_foreach(adv_update_rpa, NULL);
}
#if defined(CONFIG_BT_OBSERVER)
bool scan_enabled = false;
if (atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING) &&
atomic_test_bit(bt_dev.flags, BT_DEV_ACTIVE_SCAN) &&
!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
atomic_test_bit(bt_dev.flags, BT_DEV_SCAN_LIMITED))) {
set_le_scan_enable(BT_HCI_LE_SCAN_DISABLE);
scan_enabled = true;
}
#endif
if (IS_ENABLED(CONFIG_BT_CENTRAL) &&
IS_ENABLED(CONFIG_BT_WHITELIST) &&
atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING)) {
/* Canceled initiating procedure will be restarted by
* connection complete event.
*/
bt_le_create_conn_cancel();
}
if (!(IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features))) {
adv = bt_adv_lookup_legacy();
if (adv &&
atomic_test_bit(adv->flags, BT_ADV_ENABLED) &&
!atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
adv_enabled = true;
id = adv->id;
set_le_adv_enable_legacy(adv, false);
}
}
/* If both advertiser and scanner is running then the advertiser
* ID must be BT_ID_DEFAULT, this will update the RPA address
* for both roles.
*/
err = le_set_private_addr(id);
if (err) {
BT_WARN("Failed to update RPA address (%d)", err);
return;
}
if (adv && adv_enabled) {
set_le_adv_enable_legacy(adv, true);
}
#if defined(CONFIG_BT_OBSERVER)
if (scan_enabled) {
set_le_scan_enable(BT_HCI_LE_SCAN_ENABLE);
}
#endif
}
struct adv_id_check_data {
uint8_t id;
bool adv_enabled;
};
static void adv_id_check_func(struct bt_le_ext_adv *adv, void *data)
{
struct adv_id_check_data *check_data = data;
if (IS_ENABLED(CONFIG_BT_EXT_ADV)) {
/* Only check if the ID is in use, as the advertiser can be
* started and stopped without reconfiguring parameters.
*/
if (check_data->id == adv->id) {
check_data->adv_enabled = true;
}
} else {
if (check_data->id == adv->id &&
atomic_test_bit(adv->flags, BT_ADV_ENABLED)) {
check_data->adv_enabled = true;
}
}
}
static void adv_id_check_connectable_func(struct bt_le_ext_adv *adv, void *data)
{
struct adv_id_check_data *check_data = data;
if (atomic_test_bit(adv->flags, BT_ADV_ENABLED) &&
atomic_test_bit(adv->flags, BT_ADV_CONNECTABLE) &&
check_data->id != adv->id) {
check_data->adv_enabled = true;
}
}
#if defined(CONFIG_BT_SMP)
static void adv_is_limited_enabled(struct bt_le_ext_adv *adv, void *data)
{
bool *adv_enabled = data;
if (atomic_test_bit(adv->flags, BT_ADV_ENABLED) &&
atomic_test_bit(adv->flags, BT_ADV_LIMITED)) {
*adv_enabled = true;
}
}
static void adv_pause_enabled(struct bt_le_ext_adv *adv, void *data)
{
if (atomic_test_bit(adv->flags, BT_ADV_ENABLED)) {
atomic_set_bit(adv->flags, BT_ADV_PAUSED);
set_le_adv_enable(adv, false);
}
}
static void adv_unpause_enabled(struct bt_le_ext_adv *adv, void *data)
{
if (atomic_test_and_clear_bit(adv->flags, BT_ADV_PAUSED)) {
set_le_adv_enable(adv, true);
}
}
#endif /* defined(CONFIG_BT_SMP) */
#if defined(CONFIG_BT_PRIVACY)
static void adv_is_private_enabled(struct bt_le_ext_adv *adv, void *data)
{
bool *adv_enabled = data;
if (atomic_test_bit(adv->flags, BT_ADV_ENABLED) &&
!atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY)) {
*adv_enabled = true;
}
}
static void rpa_timeout(struct k_work *work)
{
bool adv_enabled = false;
BT_DBG("");
if (IS_ENABLED(CONFIG_BT_CENTRAL)) {
struct bt_conn *conn =
bt_conn_lookup_state_le(BT_ID_DEFAULT, NULL,
BT_CONN_CONNECT_SCAN);
if (conn) {
bt_conn_unref(conn);
bt_le_create_conn_cancel();
}
}
le_rpa_invalidate();
bt_adv_foreach(adv_is_private_enabled, &adv_enabled);
/* IF no roles using the RPA is running we can stop the RPA timer */
if (!(adv_enabled ||
atomic_test_bit(bt_dev.flags, BT_DEV_INITIATING) ||
(atomic_test_bit(bt_dev.flags, BT_DEV_SCANNING) &&
atomic_test_bit(bt_dev.flags, BT_DEV_ACTIVE_SCAN)))) {
return;
}
le_update_private_addr();
}
#endif /* CONFIG_BT_PRIVACY */
bool bt_le_scan_random_addr_check(void)
{
struct bt_le_ext_adv *adv;
if (IS_ENABLED(CONFIG_BT_EXT_ADV) &&
BT_FEAT_LE_EXT_ADV(bt_dev.le.features)) {
/* Advertiser and scanner using different random address */
return true;
}
adv = bt_adv_lookup_legacy();
if (!adv) {
return true;
}
/* If the advertiser is not enabled or not active there is no issue */
if (!IS_ENABLED(CONFIG_BT_BROADCASTER) ||
!atomic_test_bit(adv->flags, BT_ADV_ENABLED)) {
return true;
}
/* When privacy is enabled the random address will not be set
* immediately before starting the role, because the RPA might still be
* valid and only updated on RPA timeout.
*/
if (IS_ENABLED(CONFIG_BT_PRIVACY)) {
/* Cannot start scannor or initiator if the random address is
* used by the advertiser for an RPA with a different identity
* or for a random static identity address.
*/
if ((atomic_test_bit(adv->flags, BT_ADV_USE_IDENTITY) &&
bt_dev.id_addr[adv->id].type == BT_ADDR_LE_RANDOM) ||
adv->id != BT_ID_DEFAULT) {
return false;
}
}
/* If privacy is not enabled then the random address will be attempted
* to be set before enabling the role. If another role is already using
* the random address then this command will fail, and should return
* the error code to the application.
*/
return true;
}
static bool bt_le_adv_random_addr_check(const struct bt_le_adv_param *